1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
82 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
83 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
84 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
85 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
86 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
87 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
88 static int enable_scrq_irq(struct ibmvnic_adapter *,
89 struct ibmvnic_sub_crq_queue *);
90 static int disable_scrq_irq(struct ibmvnic_adapter *,
91 struct ibmvnic_sub_crq_queue *);
92 static int pending_scrq(struct ibmvnic_adapter *,
93 struct ibmvnic_sub_crq_queue *);
94 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
95 struct ibmvnic_sub_crq_queue *);
96 static int ibmvnic_poll(struct napi_struct *napi, int data);
97 static void send_query_map(struct ibmvnic_adapter *adapter);
98 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
99 static int send_request_unmap(struct ibmvnic_adapter *, u8);
100 static int send_login(struct ibmvnic_adapter *adapter);
101 static void send_query_cap(struct ibmvnic_adapter *adapter);
102 static int init_sub_crqs(struct ibmvnic_adapter *);
103 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
104 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
105 static void release_crq_queue(struct ibmvnic_adapter *);
106 static int __ibmvnic_set_mac(struct net_device *, u8 *);
107 static int init_crq_queue(struct ibmvnic_adapter *adapter);
108 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
109 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
110 struct ibmvnic_sub_crq_queue *tx_scrq);
112 struct ibmvnic_stat {
113 char name[ETH_GSTRING_LEN];
117 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
118 offsetof(struct ibmvnic_statistics, stat))
119 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
121 static const struct ibmvnic_stat ibmvnic_stats[] = {
122 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
123 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
124 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
125 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
126 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
127 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
128 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
129 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
130 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
131 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
132 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
133 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
134 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
135 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
136 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
137 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
138 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
139 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
140 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
141 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
142 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
143 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146 static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
148 union ibmvnic_crq crq;
150 memset(&crq, 0, sizeof(crq));
151 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
152 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
154 return ibmvnic_send_crq(adapter, &crq);
157 static int send_version_xchg(struct ibmvnic_adapter *adapter)
159 union ibmvnic_crq crq;
161 memset(&crq, 0, sizeof(crq));
162 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
163 crq.version_exchange.cmd = VERSION_EXCHANGE;
164 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
166 return ibmvnic_send_crq(adapter, &crq);
169 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
170 unsigned long length, unsigned long *number,
173 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
176 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
184 * ibmvnic_wait_for_completion - Check device state and wait for completion
185 * @adapter: private device data
186 * @comp_done: completion structure to wait for
187 * @timeout: time to wait in milliseconds
189 * Wait for a completion signal or until the timeout limit is reached
190 * while checking that the device is still active.
192 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
193 struct completion *comp_done,
194 unsigned long timeout)
196 struct net_device *netdev;
197 unsigned long div_timeout;
200 netdev = adapter->netdev;
202 div_timeout = msecs_to_jiffies(timeout / retry);
204 if (!adapter->crq.active) {
205 netdev_err(netdev, "Device down!\n");
210 if (wait_for_completion_timeout(comp_done, div_timeout))
213 netdev_err(netdev, "Operation timed out.\n");
217 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
218 struct ibmvnic_long_term_buff *ltb, int size)
220 struct device *dev = &adapter->vdev->dev;
224 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
228 dev_err(dev, "Couldn't alloc long term buffer\n");
231 ltb->map_id = adapter->map_id;
234 mutex_lock(&adapter->fw_lock);
235 adapter->fw_done_rc = 0;
236 reinit_completion(&adapter->fw_done);
238 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
240 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
244 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
247 "Long term map request aborted or timed out,rc = %d\n",
252 if (adapter->fw_done_rc) {
253 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
254 adapter->fw_done_rc);
261 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
264 mutex_unlock(&adapter->fw_lock);
268 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
269 struct ibmvnic_long_term_buff *ltb)
271 struct device *dev = &adapter->vdev->dev;
276 /* VIOS automatically unmaps the long term buffer at remote
277 * end for the following resets:
278 * FAILOVER, MOBILITY, TIMEOUT.
280 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
281 adapter->reset_reason != VNIC_RESET_MOBILITY &&
282 adapter->reset_reason != VNIC_RESET_TIMEOUT)
283 send_request_unmap(adapter, ltb->map_id);
284 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
289 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
290 struct ibmvnic_long_term_buff *ltb)
292 struct device *dev = &adapter->vdev->dev;
295 memset(ltb->buff, 0, ltb->size);
297 mutex_lock(&adapter->fw_lock);
298 adapter->fw_done_rc = 0;
300 reinit_completion(&adapter->fw_done);
301 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
303 mutex_unlock(&adapter->fw_lock);
307 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
310 "Reset failed, long term map request timed out or aborted\n");
311 mutex_unlock(&adapter->fw_lock);
315 if (adapter->fw_done_rc) {
317 "Reset failed, attempting to free and reallocate buffer\n");
318 free_long_term_buff(adapter, ltb);
319 mutex_unlock(&adapter->fw_lock);
320 return alloc_long_term_buff(adapter, ltb, ltb->size);
322 mutex_unlock(&adapter->fw_lock);
326 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
330 for (i = 0; i < adapter->num_active_rx_pools; i++)
331 adapter->rx_pool[i].active = 0;
334 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
335 struct ibmvnic_rx_pool *pool)
337 int count = pool->size - atomic_read(&pool->available);
338 u64 handle = adapter->rx_scrq[pool->index]->handle;
339 struct device *dev = &adapter->vdev->dev;
340 struct ibmvnic_ind_xmit_queue *ind_bufp;
341 struct ibmvnic_sub_crq_queue *rx_scrq;
342 union sub_crq *sub_crq;
343 int buffers_added = 0;
344 unsigned long lpar_rc;
356 rx_scrq = adapter->rx_scrq[pool->index];
357 ind_bufp = &rx_scrq->ind_buf;
359 /* netdev_skb_alloc() could have failed after we saved a few skbs
360 * in the indir_buf and we would not have sent them to VIOS yet.
361 * To account for them, start the loop at ind_bufp->index rather
362 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
365 for (i = ind_bufp->index; i < count; ++i) {
366 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
368 dev_err(dev, "Couldn't replenish rx buff\n");
369 adapter->replenish_no_mem++;
373 index = pool->free_map[pool->next_free];
375 if (pool->rx_buff[index].skb)
376 dev_err(dev, "Inconsistent free_map!\n");
378 /* Copy the skb to the long term mapped DMA buffer */
379 offset = index * pool->buff_size;
380 dst = pool->long_term_buff.buff + offset;
381 memset(dst, 0, pool->buff_size);
382 dma_addr = pool->long_term_buff.addr + offset;
383 pool->rx_buff[index].data = dst;
385 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
386 pool->rx_buff[index].dma = dma_addr;
387 pool->rx_buff[index].skb = skb;
388 pool->rx_buff[index].pool_index = pool->index;
389 pool->rx_buff[index].size = pool->buff_size;
391 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
392 memset(sub_crq, 0, sizeof(*sub_crq));
393 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
394 sub_crq->rx_add.correlator =
395 cpu_to_be64((u64)&pool->rx_buff[index]);
396 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
397 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
399 /* The length field of the sCRQ is defined to be 24 bits so the
400 * buffer size needs to be left shifted by a byte before it is
401 * converted to big endian to prevent the last byte from being
404 #ifdef __LITTLE_ENDIAN__
407 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
408 pool->next_free = (pool->next_free + 1) % pool->size;
409 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
412 send_subcrq_indirect(adapter, handle,
413 (u64)ind_bufp->indir_dma,
414 (u64)ind_bufp->index);
415 if (lpar_rc != H_SUCCESS)
417 buffers_added += ind_bufp->index;
418 adapter->replenish_add_buff_success += ind_bufp->index;
422 atomic_add(buffers_added, &pool->available);
426 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
427 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
428 for (i = ind_bufp->index - 1; i >= 0; --i) {
429 struct ibmvnic_rx_buff *rx_buff;
431 pool->next_free = pool->next_free == 0 ?
432 pool->size - 1 : pool->next_free - 1;
433 sub_crq = &ind_bufp->indir_arr[i];
434 rx_buff = (struct ibmvnic_rx_buff *)
435 be64_to_cpu(sub_crq->rx_add.correlator);
436 index = (int)(rx_buff - pool->rx_buff);
437 pool->free_map[pool->next_free] = index;
438 dev_kfree_skb_any(pool->rx_buff[index].skb);
439 pool->rx_buff[index].skb = NULL;
441 adapter->replenish_add_buff_failure += ind_bufp->index;
442 atomic_add(buffers_added, &pool->available);
444 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
445 /* Disable buffer pool replenishment and report carrier off if
446 * queue is closed or pending failover.
447 * Firmware guarantees that a signal will be sent to the
448 * driver, triggering a reset.
450 deactivate_rx_pools(adapter);
451 netif_carrier_off(adapter->netdev);
455 static void replenish_pools(struct ibmvnic_adapter *adapter)
459 adapter->replenish_task_cycles++;
460 for (i = 0; i < adapter->num_active_rx_pools; i++) {
461 if (adapter->rx_pool[i].active)
462 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
465 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
468 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
470 kfree(adapter->tx_stats_buffers);
471 kfree(adapter->rx_stats_buffers);
472 adapter->tx_stats_buffers = NULL;
473 adapter->rx_stats_buffers = NULL;
476 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
478 adapter->tx_stats_buffers =
479 kcalloc(IBMVNIC_MAX_QUEUES,
480 sizeof(struct ibmvnic_tx_queue_stats),
482 if (!adapter->tx_stats_buffers)
485 adapter->rx_stats_buffers =
486 kcalloc(IBMVNIC_MAX_QUEUES,
487 sizeof(struct ibmvnic_rx_queue_stats),
489 if (!adapter->rx_stats_buffers)
495 static void release_stats_token(struct ibmvnic_adapter *adapter)
497 struct device *dev = &adapter->vdev->dev;
499 if (!adapter->stats_token)
502 dma_unmap_single(dev, adapter->stats_token,
503 sizeof(struct ibmvnic_statistics),
505 adapter->stats_token = 0;
508 static int init_stats_token(struct ibmvnic_adapter *adapter)
510 struct device *dev = &adapter->vdev->dev;
513 stok = dma_map_single(dev, &adapter->stats,
514 sizeof(struct ibmvnic_statistics),
516 if (dma_mapping_error(dev, stok)) {
517 dev_err(dev, "Couldn't map stats buffer\n");
521 adapter->stats_token = stok;
522 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
526 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
528 struct ibmvnic_rx_pool *rx_pool;
533 if (!adapter->rx_pool)
536 buff_size = adapter->cur_rx_buf_sz;
537 rx_scrqs = adapter->num_active_rx_pools;
538 for (i = 0; i < rx_scrqs; i++) {
539 rx_pool = &adapter->rx_pool[i];
541 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
543 if (rx_pool->buff_size != buff_size) {
544 free_long_term_buff(adapter, &rx_pool->long_term_buff);
545 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
546 rc = alloc_long_term_buff(adapter,
547 &rx_pool->long_term_buff,
551 rc = reset_long_term_buff(adapter,
552 &rx_pool->long_term_buff);
558 for (j = 0; j < rx_pool->size; j++)
559 rx_pool->free_map[j] = j;
561 memset(rx_pool->rx_buff, 0,
562 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
564 atomic_set(&rx_pool->available, 0);
565 rx_pool->next_alloc = 0;
566 rx_pool->next_free = 0;
573 static void release_rx_pools(struct ibmvnic_adapter *adapter)
575 struct ibmvnic_rx_pool *rx_pool;
578 if (!adapter->rx_pool)
581 for (i = 0; i < adapter->num_active_rx_pools; i++) {
582 rx_pool = &adapter->rx_pool[i];
584 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
586 kfree(rx_pool->free_map);
587 free_long_term_buff(adapter, &rx_pool->long_term_buff);
589 if (!rx_pool->rx_buff)
592 for (j = 0; j < rx_pool->size; j++) {
593 if (rx_pool->rx_buff[j].skb) {
594 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
595 rx_pool->rx_buff[j].skb = NULL;
599 kfree(rx_pool->rx_buff);
602 kfree(adapter->rx_pool);
603 adapter->rx_pool = NULL;
604 adapter->num_active_rx_pools = 0;
607 static int init_rx_pools(struct net_device *netdev)
609 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
610 struct device *dev = &adapter->vdev->dev;
611 struct ibmvnic_rx_pool *rx_pool;
616 rxadd_subcrqs = adapter->num_active_rx_scrqs;
617 buff_size = adapter->cur_rx_buf_sz;
619 adapter->rx_pool = kcalloc(rxadd_subcrqs,
620 sizeof(struct ibmvnic_rx_pool),
622 if (!adapter->rx_pool) {
623 dev_err(dev, "Failed to allocate rx pools\n");
627 adapter->num_active_rx_pools = rxadd_subcrqs;
629 for (i = 0; i < rxadd_subcrqs; i++) {
630 rx_pool = &adapter->rx_pool[i];
632 netdev_dbg(adapter->netdev,
633 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
634 i, adapter->req_rx_add_entries_per_subcrq,
637 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
639 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
642 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
644 if (!rx_pool->free_map) {
645 release_rx_pools(adapter);
649 rx_pool->rx_buff = kcalloc(rx_pool->size,
650 sizeof(struct ibmvnic_rx_buff),
652 if (!rx_pool->rx_buff) {
653 dev_err(dev, "Couldn't alloc rx buffers\n");
654 release_rx_pools(adapter);
658 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
659 rx_pool->size * rx_pool->buff_size)) {
660 release_rx_pools(adapter);
664 for (j = 0; j < rx_pool->size; ++j)
665 rx_pool->free_map[j] = j;
667 atomic_set(&rx_pool->available, 0);
668 rx_pool->next_alloc = 0;
669 rx_pool->next_free = 0;
675 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
676 struct ibmvnic_tx_pool *tx_pool)
680 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
684 memset(tx_pool->tx_buff, 0,
685 tx_pool->num_buffers *
686 sizeof(struct ibmvnic_tx_buff));
688 for (i = 0; i < tx_pool->num_buffers; i++)
689 tx_pool->free_map[i] = i;
691 tx_pool->consumer_index = 0;
692 tx_pool->producer_index = 0;
697 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
702 if (!adapter->tx_pool)
705 tx_scrqs = adapter->num_active_tx_pools;
706 for (i = 0; i < tx_scrqs; i++) {
707 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
708 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
711 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
719 static void release_vpd_data(struct ibmvnic_adapter *adapter)
724 kfree(adapter->vpd->buff);
730 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
731 struct ibmvnic_tx_pool *tx_pool)
733 kfree(tx_pool->tx_buff);
734 kfree(tx_pool->free_map);
735 free_long_term_buff(adapter, &tx_pool->long_term_buff);
738 static void release_tx_pools(struct ibmvnic_adapter *adapter)
742 if (!adapter->tx_pool)
745 for (i = 0; i < adapter->num_active_tx_pools; i++) {
746 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
747 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
750 kfree(adapter->tx_pool);
751 adapter->tx_pool = NULL;
752 kfree(adapter->tso_pool);
753 adapter->tso_pool = NULL;
754 adapter->num_active_tx_pools = 0;
757 static int init_one_tx_pool(struct net_device *netdev,
758 struct ibmvnic_tx_pool *tx_pool,
759 int num_entries, int buf_size)
761 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
764 tx_pool->tx_buff = kcalloc(num_entries,
765 sizeof(struct ibmvnic_tx_buff),
767 if (!tx_pool->tx_buff)
770 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
771 num_entries * buf_size))
774 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
775 if (!tx_pool->free_map)
778 for (i = 0; i < num_entries; i++)
779 tx_pool->free_map[i] = i;
781 tx_pool->consumer_index = 0;
782 tx_pool->producer_index = 0;
783 tx_pool->num_buffers = num_entries;
784 tx_pool->buf_size = buf_size;
789 static int init_tx_pools(struct net_device *netdev)
791 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
796 tx_subcrqs = adapter->num_active_tx_scrqs;
797 adapter->tx_pool = kcalloc(tx_subcrqs,
798 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
799 if (!adapter->tx_pool)
802 adapter->tso_pool = kcalloc(tx_subcrqs,
803 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
804 if (!adapter->tso_pool) {
805 kfree(adapter->tx_pool);
806 adapter->tx_pool = NULL;
810 adapter->num_active_tx_pools = tx_subcrqs;
812 for (i = 0; i < tx_subcrqs; i++) {
813 buff_size = adapter->req_mtu + VLAN_HLEN;
814 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
815 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
816 adapter->req_tx_entries_per_subcrq,
819 release_tx_pools(adapter);
823 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
827 release_tx_pools(adapter);
835 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
839 if (adapter->napi_enabled)
842 for (i = 0; i < adapter->req_rx_queues; i++)
843 napi_enable(&adapter->napi[i]);
845 adapter->napi_enabled = true;
848 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
852 if (!adapter->napi_enabled)
855 for (i = 0; i < adapter->req_rx_queues; i++) {
856 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
857 napi_disable(&adapter->napi[i]);
860 adapter->napi_enabled = false;
863 static int init_napi(struct ibmvnic_adapter *adapter)
867 adapter->napi = kcalloc(adapter->req_rx_queues,
868 sizeof(struct napi_struct), GFP_KERNEL);
872 for (i = 0; i < adapter->req_rx_queues; i++) {
873 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
874 netif_napi_add(adapter->netdev, &adapter->napi[i],
875 ibmvnic_poll, NAPI_POLL_WEIGHT);
878 adapter->num_active_rx_napi = adapter->req_rx_queues;
882 static void release_napi(struct ibmvnic_adapter *adapter)
889 for (i = 0; i < adapter->num_active_rx_napi; i++) {
890 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
891 netif_napi_del(&adapter->napi[i]);
894 kfree(adapter->napi);
895 adapter->napi = NULL;
896 adapter->num_active_rx_napi = 0;
897 adapter->napi_enabled = false;
900 static const char *adapter_state_to_string(enum vnic_state state)
925 static int ibmvnic_login(struct net_device *netdev)
927 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
928 unsigned long timeout = msecs_to_jiffies(20000);
936 if (retry_count > retries) {
937 netdev_warn(netdev, "Login attempts exceeded\n");
941 adapter->init_done_rc = 0;
942 reinit_completion(&adapter->init_done);
943 rc = send_login(adapter);
947 if (!wait_for_completion_timeout(&adapter->init_done,
949 netdev_warn(netdev, "Login timed out, retrying...\n");
951 adapter->init_done_rc = 0;
956 if (adapter->init_done_rc == ABORTED) {
957 netdev_warn(netdev, "Login aborted, retrying...\n");
959 adapter->init_done_rc = 0;
961 /* FW or device may be busy, so
962 * wait a bit before retrying login
965 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
967 release_sub_crqs(adapter, 1);
971 "Received partial success, retrying...\n");
972 adapter->init_done_rc = 0;
973 reinit_completion(&adapter->init_done);
974 send_query_cap(adapter);
975 if (!wait_for_completion_timeout(&adapter->init_done,
978 "Capabilities query timed out\n");
982 rc = init_sub_crqs(adapter);
985 "SCRQ initialization failed\n");
989 rc = init_sub_crq_irqs(adapter);
992 "SCRQ irq initialization failed\n");
995 } else if (adapter->init_done_rc) {
996 netdev_warn(netdev, "Adapter login failed\n");
1001 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1003 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1007 static void release_login_buffer(struct ibmvnic_adapter *adapter)
1009 kfree(adapter->login_buf);
1010 adapter->login_buf = NULL;
1013 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1015 kfree(adapter->login_rsp_buf);
1016 adapter->login_rsp_buf = NULL;
1019 static void release_resources(struct ibmvnic_adapter *adapter)
1021 release_vpd_data(adapter);
1023 release_tx_pools(adapter);
1024 release_rx_pools(adapter);
1026 release_napi(adapter);
1027 release_login_buffer(adapter);
1028 release_login_rsp_buffer(adapter);
1031 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1033 struct net_device *netdev = adapter->netdev;
1034 unsigned long timeout = msecs_to_jiffies(20000);
1035 union ibmvnic_crq crq;
1039 netdev_dbg(netdev, "setting link state %d\n", link_state);
1041 memset(&crq, 0, sizeof(crq));
1042 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1043 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1044 crq.logical_link_state.link_state = link_state;
1049 reinit_completion(&adapter->init_done);
1050 rc = ibmvnic_send_crq(adapter, &crq);
1052 netdev_err(netdev, "Failed to set link state\n");
1056 if (!wait_for_completion_timeout(&adapter->init_done,
1058 netdev_err(netdev, "timeout setting link state\n");
1062 if (adapter->init_done_rc == PARTIALSUCCESS) {
1063 /* Partuial success, delay and re-send */
1066 } else if (adapter->init_done_rc) {
1067 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1068 adapter->init_done_rc);
1069 return adapter->init_done_rc;
1076 static int set_real_num_queues(struct net_device *netdev)
1078 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1081 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1082 adapter->req_tx_queues, adapter->req_rx_queues);
1084 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1086 netdev_err(netdev, "failed to set the number of tx queues\n");
1090 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1092 netdev_err(netdev, "failed to set the number of rx queues\n");
1097 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1099 struct device *dev = &adapter->vdev->dev;
1100 union ibmvnic_crq crq;
1104 if (adapter->vpd->buff)
1105 len = adapter->vpd->len;
1107 mutex_lock(&adapter->fw_lock);
1108 adapter->fw_done_rc = 0;
1109 reinit_completion(&adapter->fw_done);
1111 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1112 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1113 rc = ibmvnic_send_crq(adapter, &crq);
1115 mutex_unlock(&adapter->fw_lock);
1119 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1121 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1122 mutex_unlock(&adapter->fw_lock);
1125 mutex_unlock(&adapter->fw_lock);
1127 if (!adapter->vpd->len)
1130 if (!adapter->vpd->buff)
1131 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1132 else if (adapter->vpd->len != len)
1133 adapter->vpd->buff =
1134 krealloc(adapter->vpd->buff,
1135 adapter->vpd->len, GFP_KERNEL);
1137 if (!adapter->vpd->buff) {
1138 dev_err(dev, "Could allocate VPD buffer\n");
1142 adapter->vpd->dma_addr =
1143 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1145 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1146 dev_err(dev, "Could not map VPD buffer\n");
1147 kfree(adapter->vpd->buff);
1148 adapter->vpd->buff = NULL;
1152 mutex_lock(&adapter->fw_lock);
1153 adapter->fw_done_rc = 0;
1154 reinit_completion(&adapter->fw_done);
1156 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1157 crq.get_vpd.cmd = GET_VPD;
1158 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1159 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1160 rc = ibmvnic_send_crq(adapter, &crq);
1162 kfree(adapter->vpd->buff);
1163 adapter->vpd->buff = NULL;
1164 mutex_unlock(&adapter->fw_lock);
1168 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1170 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1171 kfree(adapter->vpd->buff);
1172 adapter->vpd->buff = NULL;
1173 mutex_unlock(&adapter->fw_lock);
1177 mutex_unlock(&adapter->fw_lock);
1181 static int init_resources(struct ibmvnic_adapter *adapter)
1183 struct net_device *netdev = adapter->netdev;
1186 rc = set_real_num_queues(netdev);
1190 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1194 /* Vital Product Data (VPD) */
1195 rc = ibmvnic_get_vpd(adapter);
1197 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1201 adapter->map_id = 1;
1203 rc = init_napi(adapter);
1207 send_query_map(adapter);
1209 rc = init_rx_pools(netdev);
1213 rc = init_tx_pools(netdev);
1217 static int __ibmvnic_open(struct net_device *netdev)
1219 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1220 enum vnic_state prev_state = adapter->state;
1223 adapter->state = VNIC_OPENING;
1224 replenish_pools(adapter);
1225 ibmvnic_napi_enable(adapter);
1227 /* We're ready to receive frames, enable the sub-crq interrupts and
1228 * set the logical link state to up
1230 for (i = 0; i < adapter->req_rx_queues; i++) {
1231 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1232 if (prev_state == VNIC_CLOSED)
1233 enable_irq(adapter->rx_scrq[i]->irq);
1234 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1237 for (i = 0; i < adapter->req_tx_queues; i++) {
1238 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1239 if (prev_state == VNIC_CLOSED)
1240 enable_irq(adapter->tx_scrq[i]->irq);
1241 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1242 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1245 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1247 ibmvnic_napi_disable(adapter);
1248 release_resources(adapter);
1252 netif_tx_start_all_queues(netdev);
1254 if (prev_state == VNIC_CLOSED) {
1255 for (i = 0; i < adapter->req_rx_queues; i++)
1256 napi_schedule(&adapter->napi[i]);
1259 adapter->state = VNIC_OPEN;
1263 static int ibmvnic_open(struct net_device *netdev)
1265 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1270 /* If device failover is pending or we are about to reset, just set
1271 * device state and return. Device operation will be handled by reset
1274 * It should be safe to overwrite the adapter->state here. Since
1275 * we hold the rtnl, either the reset has not actually started or
1276 * the rtnl got dropped during the set_link_state() in do_reset().
1277 * In the former case, no one else is changing the state (again we
1278 * have the rtnl) and in the latter case, do_reset() will detect and
1279 * honor our setting below.
1281 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1282 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1283 adapter_state_to_string(adapter->state),
1284 adapter->failover_pending);
1285 adapter->state = VNIC_OPEN;
1290 if (adapter->state != VNIC_CLOSED) {
1291 rc = ibmvnic_login(netdev);
1295 rc = init_resources(adapter);
1297 netdev_err(netdev, "failed to initialize resources\n");
1298 release_resources(adapter);
1303 rc = __ibmvnic_open(netdev);
1306 /* If open failed and there is a pending failover or in-progress reset,
1307 * set device state and return. Device operation will be handled by
1308 * reset routine. See also comments above regarding rtnl.
1311 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1312 adapter->state = VNIC_OPEN;
1318 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1320 struct ibmvnic_rx_pool *rx_pool;
1321 struct ibmvnic_rx_buff *rx_buff;
1326 if (!adapter->rx_pool)
1329 rx_scrqs = adapter->num_active_rx_pools;
1330 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1332 /* Free any remaining skbs in the rx buffer pools */
1333 for (i = 0; i < rx_scrqs; i++) {
1334 rx_pool = &adapter->rx_pool[i];
1335 if (!rx_pool || !rx_pool->rx_buff)
1338 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1339 for (j = 0; j < rx_entries; j++) {
1340 rx_buff = &rx_pool->rx_buff[j];
1341 if (rx_buff && rx_buff->skb) {
1342 dev_kfree_skb_any(rx_buff->skb);
1343 rx_buff->skb = NULL;
1349 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1350 struct ibmvnic_tx_pool *tx_pool)
1352 struct ibmvnic_tx_buff *tx_buff;
1356 if (!tx_pool || !tx_pool->tx_buff)
1359 tx_entries = tx_pool->num_buffers;
1361 for (i = 0; i < tx_entries; i++) {
1362 tx_buff = &tx_pool->tx_buff[i];
1363 if (tx_buff && tx_buff->skb) {
1364 dev_kfree_skb_any(tx_buff->skb);
1365 tx_buff->skb = NULL;
1370 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1375 if (!adapter->tx_pool || !adapter->tso_pool)
1378 tx_scrqs = adapter->num_active_tx_pools;
1380 /* Free any remaining skbs in the tx buffer pools */
1381 for (i = 0; i < tx_scrqs; i++) {
1382 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1383 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1384 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1388 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1390 struct net_device *netdev = adapter->netdev;
1393 if (adapter->tx_scrq) {
1394 for (i = 0; i < adapter->req_tx_queues; i++)
1395 if (adapter->tx_scrq[i]->irq) {
1397 "Disabling tx_scrq[%d] irq\n", i);
1398 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1399 disable_irq(adapter->tx_scrq[i]->irq);
1403 if (adapter->rx_scrq) {
1404 for (i = 0; i < adapter->req_rx_queues; i++) {
1405 if (adapter->rx_scrq[i]->irq) {
1407 "Disabling rx_scrq[%d] irq\n", i);
1408 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1409 disable_irq(adapter->rx_scrq[i]->irq);
1415 static void ibmvnic_cleanup(struct net_device *netdev)
1417 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1419 /* ensure that transmissions are stopped if called by do_reset */
1420 if (test_bit(0, &adapter->resetting))
1421 netif_tx_disable(netdev);
1423 netif_tx_stop_all_queues(netdev);
1425 ibmvnic_napi_disable(adapter);
1426 ibmvnic_disable_irqs(adapter);
1428 clean_rx_pools(adapter);
1429 clean_tx_pools(adapter);
1432 static int __ibmvnic_close(struct net_device *netdev)
1434 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1437 adapter->state = VNIC_CLOSING;
1438 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1439 adapter->state = VNIC_CLOSED;
1443 static int ibmvnic_close(struct net_device *netdev)
1445 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1448 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1449 adapter_state_to_string(adapter->state),
1450 adapter->failover_pending,
1451 adapter->force_reset_recovery);
1453 /* If device failover is pending, just set device state and return.
1454 * Device operation will be handled by reset routine.
1456 if (adapter->failover_pending) {
1457 adapter->state = VNIC_CLOSED;
1461 rc = __ibmvnic_close(netdev);
1462 ibmvnic_cleanup(netdev);
1468 * build_hdr_data - creates L2/L3/L4 header data buffer
1469 * @hdr_field: bitfield determining needed headers
1470 * @skb: socket buffer
1471 * @hdr_len: array of header lengths
1472 * @hdr_data: buffer to write the header to
1474 * Reads hdr_field to determine which headers are needed by firmware.
1475 * Builds a buffer containing these headers. Saves individual header
1476 * lengths and total buffer length to be used to build descriptors.
1478 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1479 int *hdr_len, u8 *hdr_data)
1484 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1485 hdr_len[0] = sizeof(struct vlan_ethhdr);
1487 hdr_len[0] = sizeof(struct ethhdr);
1489 if (skb->protocol == htons(ETH_P_IP)) {
1490 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1491 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1492 hdr_len[2] = tcp_hdrlen(skb);
1493 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1494 hdr_len[2] = sizeof(struct udphdr);
1495 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1496 hdr_len[1] = sizeof(struct ipv6hdr);
1497 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1498 hdr_len[2] = tcp_hdrlen(skb);
1499 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1500 hdr_len[2] = sizeof(struct udphdr);
1501 } else if (skb->protocol == htons(ETH_P_ARP)) {
1502 hdr_len[1] = arp_hdr_len(skb->dev);
1506 memset(hdr_data, 0, 120);
1507 if ((hdr_field >> 6) & 1) {
1508 hdr = skb_mac_header(skb);
1509 memcpy(hdr_data, hdr, hdr_len[0]);
1513 if ((hdr_field >> 5) & 1) {
1514 hdr = skb_network_header(skb);
1515 memcpy(hdr_data + len, hdr, hdr_len[1]);
1519 if ((hdr_field >> 4) & 1) {
1520 hdr = skb_transport_header(skb);
1521 memcpy(hdr_data + len, hdr, hdr_len[2]);
1528 * create_hdr_descs - create header and header extension descriptors
1529 * @hdr_field: bitfield determining needed headers
1530 * @hdr_data: buffer containing header data
1531 * @len: length of data buffer
1532 * @hdr_len: array of individual header lengths
1533 * @scrq_arr: descriptor array
1535 * Creates header and, if needed, header extension descriptors and
1536 * places them in a descriptor array, scrq_arr
1539 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1540 union sub_crq *scrq_arr)
1542 union sub_crq hdr_desc;
1548 while (tmp_len > 0) {
1549 cur = hdr_data + len - tmp_len;
1551 memset(&hdr_desc, 0, sizeof(hdr_desc));
1552 if (cur != hdr_data) {
1553 data = hdr_desc.hdr_ext.data;
1554 tmp = tmp_len > 29 ? 29 : tmp_len;
1555 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1556 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1557 hdr_desc.hdr_ext.len = tmp;
1559 data = hdr_desc.hdr.data;
1560 tmp = tmp_len > 24 ? 24 : tmp_len;
1561 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1562 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1563 hdr_desc.hdr.len = tmp;
1564 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1565 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1566 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1567 hdr_desc.hdr.flag = hdr_field << 1;
1569 memcpy(data, cur, tmp);
1571 *scrq_arr = hdr_desc;
1580 * build_hdr_descs_arr - build a header descriptor array
1581 * @skb: tx socket buffer
1582 * @indir_arr: indirect array
1583 * @num_entries: number of descriptors to be sent
1584 * @hdr_field: bit field determining which headers will be sent
1586 * This function will build a TX descriptor array with applicable
1587 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1590 static void build_hdr_descs_arr(struct sk_buff *skb,
1591 union sub_crq *indir_arr,
1592 int *num_entries, u8 hdr_field)
1594 int hdr_len[3] = {0, 0, 0};
1595 u8 hdr_data[140] = {0};
1598 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1600 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1604 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1605 struct net_device *netdev)
1607 /* For some backing devices, mishandling of small packets
1608 * can result in a loss of connection or TX stall. Device
1609 * architects recommend that no packet should be smaller
1610 * than the minimum MTU value provided to the driver, so
1611 * pad any packets to that length
1613 if (skb->len < netdev->min_mtu)
1614 return skb_put_padto(skb, netdev->min_mtu);
1619 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1620 struct ibmvnic_sub_crq_queue *tx_scrq)
1622 struct ibmvnic_ind_xmit_queue *ind_bufp;
1623 struct ibmvnic_tx_buff *tx_buff;
1624 struct ibmvnic_tx_pool *tx_pool;
1625 union sub_crq tx_scrq_entry;
1631 ind_bufp = &tx_scrq->ind_buf;
1632 entries = (u64)ind_bufp->index;
1633 queue_num = tx_scrq->pool_index;
1635 for (i = entries - 1; i >= 0; --i) {
1636 tx_scrq_entry = ind_bufp->indir_arr[i];
1637 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1639 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1640 if (index & IBMVNIC_TSO_POOL_MASK) {
1641 tx_pool = &adapter->tso_pool[queue_num];
1642 index &= ~IBMVNIC_TSO_POOL_MASK;
1644 tx_pool = &adapter->tx_pool[queue_num];
1646 tx_pool->free_map[tx_pool->consumer_index] = index;
1647 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1648 tx_pool->num_buffers - 1 :
1649 tx_pool->consumer_index - 1;
1650 tx_buff = &tx_pool->tx_buff[index];
1651 adapter->netdev->stats.tx_packets--;
1652 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1653 adapter->tx_stats_buffers[queue_num].packets--;
1654 adapter->tx_stats_buffers[queue_num].bytes -=
1656 dev_kfree_skb_any(tx_buff->skb);
1657 tx_buff->skb = NULL;
1658 adapter->netdev->stats.tx_dropped++;
1660 ind_bufp->index = 0;
1661 if (atomic_sub_return(entries, &tx_scrq->used) <=
1662 (adapter->req_tx_entries_per_subcrq / 2) &&
1663 __netif_subqueue_stopped(adapter->netdev, queue_num) &&
1664 !test_bit(0, &adapter->resetting)) {
1665 netif_wake_subqueue(adapter->netdev, queue_num);
1666 netdev_dbg(adapter->netdev, "Started queue %d\n",
1671 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1672 struct ibmvnic_sub_crq_queue *tx_scrq)
1674 struct ibmvnic_ind_xmit_queue *ind_bufp;
1680 ind_bufp = &tx_scrq->ind_buf;
1681 dma_addr = (u64)ind_bufp->indir_dma;
1682 entries = (u64)ind_bufp->index;
1683 handle = tx_scrq->handle;
1687 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1689 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1691 ind_bufp->index = 0;
1695 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1697 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1698 int queue_num = skb_get_queue_mapping(skb);
1699 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1700 struct device *dev = &adapter->vdev->dev;
1701 struct ibmvnic_ind_xmit_queue *ind_bufp;
1702 struct ibmvnic_tx_buff *tx_buff = NULL;
1703 struct ibmvnic_sub_crq_queue *tx_scrq;
1704 struct ibmvnic_tx_pool *tx_pool;
1705 unsigned int tx_send_failed = 0;
1706 netdev_tx_t ret = NETDEV_TX_OK;
1707 unsigned int tx_map_failed = 0;
1708 union sub_crq indir_arr[16];
1709 unsigned int tx_dropped = 0;
1710 unsigned int tx_packets = 0;
1711 unsigned int tx_bytes = 0;
1712 dma_addr_t data_dma_addr;
1713 struct netdev_queue *txq;
1714 unsigned long lpar_rc;
1715 union sub_crq tx_crq;
1716 unsigned int offset;
1717 int num_entries = 1;
1722 tx_scrq = adapter->tx_scrq[queue_num];
1723 txq = netdev_get_tx_queue(netdev, queue_num);
1724 ind_bufp = &tx_scrq->ind_buf;
1726 if (test_bit(0, &adapter->resetting)) {
1727 if (!netif_subqueue_stopped(netdev, skb))
1728 netif_stop_subqueue(netdev, queue_num);
1729 dev_kfree_skb_any(skb);
1734 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1738 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1742 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1745 if (skb_is_gso(skb))
1746 tx_pool = &adapter->tso_pool[queue_num];
1748 tx_pool = &adapter->tx_pool[queue_num];
1750 index = tx_pool->free_map[tx_pool->consumer_index];
1752 if (index == IBMVNIC_INVALID_MAP) {
1753 dev_kfree_skb_any(skb);
1760 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1762 offset = index * tx_pool->buf_size;
1763 dst = tx_pool->long_term_buff.buff + offset;
1764 memset(dst, 0, tx_pool->buf_size);
1765 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1767 if (skb_shinfo(skb)->nr_frags) {
1771 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1772 cur = skb_headlen(skb);
1774 /* Copy the frags */
1775 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1776 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1778 memcpy(dst + cur, skb_frag_address(frag),
1779 skb_frag_size(frag));
1780 cur += skb_frag_size(frag);
1783 skb_copy_from_linear_data(skb, dst, skb->len);
1786 /* post changes to long_term_buff *dst before VIOS accessing it */
1789 tx_pool->consumer_index =
1790 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1792 tx_buff = &tx_pool->tx_buff[index];
1794 tx_buff->index = index;
1795 tx_buff->pool_index = queue_num;
1797 memset(&tx_crq, 0, sizeof(tx_crq));
1798 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1799 tx_crq.v1.type = IBMVNIC_TX_DESC;
1800 tx_crq.v1.n_crq_elem = 1;
1801 tx_crq.v1.n_sge = 1;
1802 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1804 if (skb_is_gso(skb))
1805 tx_crq.v1.correlator =
1806 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1808 tx_crq.v1.correlator = cpu_to_be32(index);
1809 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1810 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1811 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1813 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1814 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1815 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1818 if (skb->protocol == htons(ETH_P_IP)) {
1819 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1820 proto = ip_hdr(skb)->protocol;
1821 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1822 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1823 proto = ipv6_hdr(skb)->nexthdr;
1826 if (proto == IPPROTO_TCP)
1827 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1828 else if (proto == IPPROTO_UDP)
1829 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1831 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1832 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1835 if (skb_is_gso(skb)) {
1836 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1837 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1841 if ((*hdrs >> 7) & 1)
1842 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
1844 tx_crq.v1.n_crq_elem = num_entries;
1845 tx_buff->num_entries = num_entries;
1846 /* flush buffer if current entry can not fit */
1847 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
1848 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1849 if (lpar_rc != H_SUCCESS)
1853 indir_arr[0] = tx_crq;
1854 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
1855 num_entries * sizeof(struct ibmvnic_generic_scrq));
1856 ind_bufp->index += num_entries;
1857 if (__netdev_tx_sent_queue(txq, skb->len,
1858 netdev_xmit_more() &&
1859 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
1860 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1861 if (lpar_rc != H_SUCCESS)
1865 if (atomic_add_return(num_entries, &tx_scrq->used)
1866 >= adapter->req_tx_entries_per_subcrq) {
1867 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1868 netif_stop_subqueue(netdev, queue_num);
1872 tx_bytes += skb->len;
1873 txq->trans_start = jiffies;
1878 dev_kfree_skb_any(skb);
1879 tx_buff->skb = NULL;
1880 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1881 tx_pool->num_buffers - 1 :
1882 tx_pool->consumer_index - 1;
1885 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1886 dev_err_ratelimited(dev, "tx: send failed\n");
1888 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1889 /* Disable TX and report carrier off if queue is closed
1890 * or pending failover.
1891 * Firmware guarantees that a signal will be sent to the
1892 * driver, triggering a reset or some other action.
1894 netif_tx_stop_all_queues(netdev);
1895 netif_carrier_off(netdev);
1898 netdev->stats.tx_dropped += tx_dropped;
1899 netdev->stats.tx_bytes += tx_bytes;
1900 netdev->stats.tx_packets += tx_packets;
1901 adapter->tx_send_failed += tx_send_failed;
1902 adapter->tx_map_failed += tx_map_failed;
1903 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1904 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1905 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1910 static void ibmvnic_set_multi(struct net_device *netdev)
1912 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1913 struct netdev_hw_addr *ha;
1914 union ibmvnic_crq crq;
1916 memset(&crq, 0, sizeof(crq));
1917 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1918 crq.request_capability.cmd = REQUEST_CAPABILITY;
1920 if (netdev->flags & IFF_PROMISC) {
1921 if (!adapter->promisc_supported)
1924 if (netdev->flags & IFF_ALLMULTI) {
1925 /* Accept all multicast */
1926 memset(&crq, 0, sizeof(crq));
1927 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1928 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1929 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1930 ibmvnic_send_crq(adapter, &crq);
1931 } else if (netdev_mc_empty(netdev)) {
1932 /* Reject all multicast */
1933 memset(&crq, 0, sizeof(crq));
1934 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1935 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1936 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1937 ibmvnic_send_crq(adapter, &crq);
1939 /* Accept one or more multicast(s) */
1940 netdev_for_each_mc_addr(ha, netdev) {
1941 memset(&crq, 0, sizeof(crq));
1942 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1943 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1944 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1945 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1947 ibmvnic_send_crq(adapter, &crq);
1953 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1955 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1956 union ibmvnic_crq crq;
1959 if (!is_valid_ether_addr(dev_addr)) {
1960 rc = -EADDRNOTAVAIL;
1964 memset(&crq, 0, sizeof(crq));
1965 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1966 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1967 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1969 mutex_lock(&adapter->fw_lock);
1970 adapter->fw_done_rc = 0;
1971 reinit_completion(&adapter->fw_done);
1973 rc = ibmvnic_send_crq(adapter, &crq);
1976 mutex_unlock(&adapter->fw_lock);
1980 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1981 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1982 if (rc || adapter->fw_done_rc) {
1984 mutex_unlock(&adapter->fw_lock);
1987 mutex_unlock(&adapter->fw_lock);
1990 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1994 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1996 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1997 struct sockaddr *addr = p;
2001 if (!is_valid_ether_addr(addr->sa_data))
2002 return -EADDRNOTAVAIL;
2004 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2005 if (adapter->state != VNIC_PROBED)
2006 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2011 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2014 case VNIC_RESET_FAILOVER:
2016 case VNIC_RESET_MOBILITY:
2018 case VNIC_RESET_FATAL:
2020 case VNIC_RESET_NON_FATAL:
2022 case VNIC_RESET_TIMEOUT:
2024 case VNIC_RESET_CHANGE_PARAM:
2025 return "CHANGE_PARAM";
2026 case VNIC_RESET_PASSIVE_INIT:
2027 return "PASSIVE_INIT";
2033 * do_reset returns zero if we are able to keep processing reset events, or
2034 * non-zero if we hit a fatal error and must halt.
2036 static int do_reset(struct ibmvnic_adapter *adapter,
2037 struct ibmvnic_rwi *rwi, u32 reset_state)
2039 u64 old_num_rx_queues, old_num_tx_queues;
2040 u64 old_num_rx_slots, old_num_tx_slots;
2041 struct net_device *netdev = adapter->netdev;
2044 netdev_dbg(adapter->netdev,
2045 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2046 adapter_state_to_string(adapter->state),
2047 adapter->failover_pending,
2048 reset_reason_to_string(rwi->reset_reason),
2049 adapter_state_to_string(reset_state));
2051 adapter->reset_reason = rwi->reset_reason;
2052 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2053 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2056 /* Now that we have the rtnl lock, clear any pending failover.
2057 * This will ensure ibmvnic_open() has either completed or will
2058 * block until failover is complete.
2060 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2061 adapter->failover_pending = false;
2063 /* read the state and check (again) after getting rtnl */
2064 reset_state = adapter->state;
2066 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2071 netif_carrier_off(netdev);
2073 old_num_rx_queues = adapter->req_rx_queues;
2074 old_num_tx_queues = adapter->req_tx_queues;
2075 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2076 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2078 ibmvnic_cleanup(netdev);
2080 if (reset_state == VNIC_OPEN &&
2081 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2082 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2083 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2084 rc = __ibmvnic_close(netdev);
2088 adapter->state = VNIC_CLOSING;
2090 /* Release the RTNL lock before link state change and
2091 * re-acquire after the link state change to allow
2092 * linkwatch_event to grab the RTNL lock and run during
2096 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2101 if (adapter->state == VNIC_OPEN) {
2102 /* When we dropped rtnl, ibmvnic_open() got
2103 * it and noticed that we are resetting and
2104 * set the adapter state to OPEN. Update our
2105 * new "target" state, and resume the reset
2106 * from VNIC_CLOSING state.
2109 "Open changed state from %s, updating.\n",
2110 adapter_state_to_string(reset_state));
2111 reset_state = VNIC_OPEN;
2112 adapter->state = VNIC_CLOSING;
2115 if (adapter->state != VNIC_CLOSING) {
2116 /* If someone else changed the adapter state
2117 * when we dropped the rtnl, fail the reset
2122 adapter->state = VNIC_CLOSED;
2126 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2127 release_resources(adapter);
2128 release_sub_crqs(adapter, 1);
2129 release_crq_queue(adapter);
2132 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2133 /* remove the closed state so when we call open it appears
2134 * we are coming from the probed state.
2136 adapter->state = VNIC_PROBED;
2138 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2139 rc = init_crq_queue(adapter);
2140 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2141 rc = ibmvnic_reenable_crq_queue(adapter);
2142 release_sub_crqs(adapter, 1);
2144 rc = ibmvnic_reset_crq(adapter);
2145 if (rc == H_CLOSED || rc == H_SUCCESS) {
2146 rc = vio_enable_interrupts(adapter->vdev);
2148 netdev_err(adapter->netdev,
2149 "Reset failed to enable interrupts. rc=%d\n",
2155 netdev_err(adapter->netdev,
2156 "Reset couldn't initialize crq. rc=%d\n", rc);
2160 rc = ibmvnic_reset_init(adapter, true);
2162 rc = IBMVNIC_INIT_FAILED;
2166 /* If the adapter was in PROBE or DOWN state prior to the reset,
2169 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2174 rc = ibmvnic_login(netdev);
2178 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2179 rc = init_resources(adapter);
2182 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2183 adapter->req_tx_queues != old_num_tx_queues ||
2184 adapter->req_rx_add_entries_per_subcrq !=
2186 adapter->req_tx_entries_per_subcrq !=
2188 !adapter->rx_pool ||
2189 !adapter->tso_pool ||
2190 !adapter->tx_pool) {
2191 release_rx_pools(adapter);
2192 release_tx_pools(adapter);
2193 release_napi(adapter);
2194 release_vpd_data(adapter);
2196 rc = init_resources(adapter);
2201 rc = reset_tx_pools(adapter);
2203 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2208 rc = reset_rx_pools(adapter);
2210 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2215 ibmvnic_disable_irqs(adapter);
2217 adapter->state = VNIC_CLOSED;
2219 if (reset_state == VNIC_CLOSED) {
2224 rc = __ibmvnic_open(netdev);
2226 rc = IBMVNIC_OPEN_FAILED;
2230 /* refresh device's multicast list */
2231 ibmvnic_set_multi(netdev);
2233 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2234 adapter->reset_reason == VNIC_RESET_MOBILITY)
2235 __netdev_notify_peers(netdev);
2240 /* restore the adapter state if reset failed */
2242 adapter->state = reset_state;
2243 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2244 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2247 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2248 adapter_state_to_string(adapter->state),
2249 adapter->failover_pending, rc);
2253 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2254 struct ibmvnic_rwi *rwi, u32 reset_state)
2256 struct net_device *netdev = adapter->netdev;
2259 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2260 reset_reason_to_string(rwi->reset_reason));
2262 /* read the state and check (again) after getting rtnl */
2263 reset_state = adapter->state;
2265 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2270 netif_carrier_off(netdev);
2271 adapter->reset_reason = rwi->reset_reason;
2273 ibmvnic_cleanup(netdev);
2274 release_resources(adapter);
2275 release_sub_crqs(adapter, 0);
2276 release_crq_queue(adapter);
2278 /* remove the closed state so when we call open it appears
2279 * we are coming from the probed state.
2281 adapter->state = VNIC_PROBED;
2283 reinit_completion(&adapter->init_done);
2284 rc = init_crq_queue(adapter);
2286 netdev_err(adapter->netdev,
2287 "Couldn't initialize crq. rc=%d\n", rc);
2291 rc = ibmvnic_reset_init(adapter, false);
2295 /* If the adapter was in PROBE or DOWN state prior to the reset,
2298 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
2301 rc = ibmvnic_login(netdev);
2305 rc = init_resources(adapter);
2309 ibmvnic_disable_irqs(adapter);
2310 adapter->state = VNIC_CLOSED;
2312 if (reset_state == VNIC_CLOSED)
2315 rc = __ibmvnic_open(netdev);
2317 rc = IBMVNIC_OPEN_FAILED;
2321 __netdev_notify_peers(netdev);
2323 /* restore adapter state if reset failed */
2325 adapter->state = reset_state;
2326 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2327 adapter_state_to_string(adapter->state),
2328 adapter->failover_pending, rc);
2332 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2334 struct ibmvnic_rwi *rwi;
2335 unsigned long flags;
2337 spin_lock_irqsave(&adapter->rwi_lock, flags);
2339 if (!list_empty(&adapter->rwi_list)) {
2340 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2342 list_del(&rwi->list);
2347 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2352 * do_passive_init - complete probing when partner device is detected.
2353 * @adapter: ibmvnic_adapter struct
2355 * If the ibmvnic device does not have a partner device to communicate with at boot
2356 * and that partner device comes online at a later time, this function is called
2357 * to complete the initialization process of ibmvnic device.
2358 * Caller is expected to hold rtnl_lock().
2360 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
2361 * in the down state.
2362 * Returns 0 upon success and the device is in PROBED state.
2365 static int do_passive_init(struct ibmvnic_adapter *adapter)
2367 unsigned long timeout = msecs_to_jiffies(30000);
2368 struct net_device *netdev = adapter->netdev;
2369 struct device *dev = &adapter->vdev->dev;
2372 netdev_dbg(netdev, "Partner device found, probing.\n");
2374 adapter->state = VNIC_PROBING;
2375 reinit_completion(&adapter->init_done);
2376 adapter->init_done_rc = 0;
2377 adapter->crq.active = true;
2379 rc = send_crq_init_complete(adapter);
2383 rc = send_version_xchg(adapter);
2385 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2387 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2388 dev_err(dev, "Initialization sequence timed out\n");
2393 rc = init_sub_crqs(adapter);
2395 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2399 rc = init_sub_crq_irqs(adapter);
2401 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2405 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2406 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2407 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2409 adapter->state = VNIC_PROBED;
2410 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2415 release_sub_crqs(adapter, 1);
2417 adapter->state = VNIC_DOWN;
2421 static void __ibmvnic_reset(struct work_struct *work)
2423 struct ibmvnic_rwi *rwi;
2424 struct ibmvnic_adapter *adapter;
2425 bool saved_state = false;
2426 unsigned long flags;
2430 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2432 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2433 queue_delayed_work(system_long_wq,
2434 &adapter->ibmvnic_delayed_reset,
2435 IBMVNIC_RESET_DELAY);
2439 rwi = get_next_rwi(adapter);
2441 spin_lock_irqsave(&adapter->state_lock, flags);
2443 if (adapter->state == VNIC_REMOVING ||
2444 adapter->state == VNIC_REMOVED) {
2445 spin_unlock_irqrestore(&adapter->state_lock, flags);
2452 reset_state = adapter->state;
2455 spin_unlock_irqrestore(&adapter->state_lock, flags);
2457 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2459 rc = do_passive_init(adapter);
2462 netif_carrier_on(adapter->netdev);
2463 } else if (adapter->force_reset_recovery) {
2464 /* Since we are doing a hard reset now, clear the
2465 * failover_pending flag so we don't ignore any
2466 * future MOBILITY or other resets.
2468 adapter->failover_pending = false;
2470 /* Transport event occurred during previous reset */
2471 if (adapter->wait_for_reset) {
2472 /* Previous was CHANGE_PARAM; caller locked */
2473 adapter->force_reset_recovery = false;
2474 rc = do_hard_reset(adapter, rwi, reset_state);
2477 adapter->force_reset_recovery = false;
2478 rc = do_hard_reset(adapter, rwi, reset_state);
2482 /* give backing device time to settle down */
2483 netdev_dbg(adapter->netdev,
2484 "[S:%s] Hard reset failed, waiting 60 secs\n",
2485 adapter_state_to_string(adapter->state));
2486 set_current_state(TASK_UNINTERRUPTIBLE);
2487 schedule_timeout(60 * HZ);
2490 rc = do_reset(adapter, rwi, reset_state);
2493 adapter->last_reset_time = jiffies;
2496 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2498 rwi = get_next_rwi(adapter);
2500 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2501 rwi->reset_reason == VNIC_RESET_MOBILITY))
2502 adapter->force_reset_recovery = true;
2505 if (adapter->wait_for_reset) {
2506 adapter->reset_done_rc = rc;
2507 complete(&adapter->reset_done);
2510 clear_bit_unlock(0, &adapter->resetting);
2512 netdev_dbg(adapter->netdev,
2513 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
2514 adapter_state_to_string(adapter->state),
2515 adapter->force_reset_recovery,
2516 adapter->wait_for_reset);
2519 static void __ibmvnic_delayed_reset(struct work_struct *work)
2521 struct ibmvnic_adapter *adapter;
2523 adapter = container_of(work, struct ibmvnic_adapter,
2524 ibmvnic_delayed_reset.work);
2525 __ibmvnic_reset(&adapter->ibmvnic_reset);
2528 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2529 enum ibmvnic_reset_reason reason)
2531 struct list_head *entry, *tmp_entry;
2532 struct ibmvnic_rwi *rwi, *tmp;
2533 struct net_device *netdev = adapter->netdev;
2534 unsigned long flags;
2537 spin_lock_irqsave(&adapter->rwi_lock, flags);
2539 /* If failover is pending don't schedule any other reset.
2540 * Instead let the failover complete. If there is already a
2541 * a failover reset scheduled, we will detect and drop the
2542 * duplicate reset when walking the ->rwi_list below.
2544 if (adapter->state == VNIC_REMOVING ||
2545 adapter->state == VNIC_REMOVED ||
2546 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2548 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2552 if (adapter->state == VNIC_PROBING) {
2553 netdev_warn(netdev, "Adapter reset during probe\n");
2554 adapter->init_done_rc = EAGAIN;
2559 list_for_each_entry(tmp, &adapter->rwi_list, list) {
2560 if (tmp->reset_reason == reason) {
2561 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
2562 reset_reason_to_string(reason));
2568 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2573 /* if we just received a transport event,
2574 * flush reset queue and process this reset
2576 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2577 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2580 rwi->reset_reason = reason;
2581 list_add_tail(&rwi->list, &adapter->rwi_list);
2582 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
2583 reset_reason_to_string(reason));
2584 queue_work(system_long_wq, &adapter->ibmvnic_reset);
2588 /* ibmvnic_close() below can block, so drop the lock first */
2589 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2592 ibmvnic_close(netdev);
2597 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2599 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2601 if (test_bit(0, &adapter->resetting)) {
2602 netdev_err(adapter->netdev,
2603 "Adapter is resetting, skip timeout reset\n");
2606 /* No queuing up reset until at least 5 seconds (default watchdog val)
2609 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2610 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2613 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2616 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2617 struct ibmvnic_rx_buff *rx_buff)
2619 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2621 rx_buff->skb = NULL;
2623 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2624 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2626 atomic_dec(&pool->available);
2629 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2631 struct ibmvnic_sub_crq_queue *rx_scrq;
2632 struct ibmvnic_adapter *adapter;
2633 struct net_device *netdev;
2634 int frames_processed;
2638 adapter = netdev_priv(netdev);
2639 scrq_num = (int)(napi - adapter->napi);
2640 frames_processed = 0;
2641 rx_scrq = adapter->rx_scrq[scrq_num];
2644 while (frames_processed < budget) {
2645 struct sk_buff *skb;
2646 struct ibmvnic_rx_buff *rx_buff;
2647 union sub_crq *next;
2652 if (unlikely(test_bit(0, &adapter->resetting) &&
2653 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2654 enable_scrq_irq(adapter, rx_scrq);
2655 napi_complete_done(napi, frames_processed);
2656 return frames_processed;
2659 if (!pending_scrq(adapter, rx_scrq))
2661 next = ibmvnic_next_scrq(adapter, rx_scrq);
2662 rx_buff = (struct ibmvnic_rx_buff *)
2663 be64_to_cpu(next->rx_comp.correlator);
2664 /* do error checking */
2665 if (next->rx_comp.rc) {
2666 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2667 be16_to_cpu(next->rx_comp.rc));
2668 /* free the entry */
2669 next->rx_comp.first = 0;
2670 dev_kfree_skb_any(rx_buff->skb);
2671 remove_buff_from_pool(adapter, rx_buff);
2673 } else if (!rx_buff->skb) {
2674 /* free the entry */
2675 next->rx_comp.first = 0;
2676 remove_buff_from_pool(adapter, rx_buff);
2680 length = be32_to_cpu(next->rx_comp.len);
2681 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2682 flags = next->rx_comp.flags;
2684 /* load long_term_buff before copying to skb */
2686 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2689 /* VLAN Header has been stripped by the system firmware and
2690 * needs to be inserted by the driver
2692 if (adapter->rx_vlan_header_insertion &&
2693 (flags & IBMVNIC_VLAN_STRIPPED))
2694 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2695 ntohs(next->rx_comp.vlan_tci));
2697 /* free the entry */
2698 next->rx_comp.first = 0;
2699 remove_buff_from_pool(adapter, rx_buff);
2701 skb_put(skb, length);
2702 skb->protocol = eth_type_trans(skb, netdev);
2703 skb_record_rx_queue(skb, scrq_num);
2705 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2706 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2707 skb->ip_summed = CHECKSUM_UNNECESSARY;
2711 napi_gro_receive(napi, skb); /* send it up */
2712 netdev->stats.rx_packets++;
2713 netdev->stats.rx_bytes += length;
2714 adapter->rx_stats_buffers[scrq_num].packets++;
2715 adapter->rx_stats_buffers[scrq_num].bytes += length;
2719 if (adapter->state != VNIC_CLOSING &&
2720 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2721 adapter->req_rx_add_entries_per_subcrq / 2) ||
2722 frames_processed < budget))
2723 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2724 if (frames_processed < budget) {
2725 if (napi_complete_done(napi, frames_processed)) {
2726 enable_scrq_irq(adapter, rx_scrq);
2727 if (pending_scrq(adapter, rx_scrq)) {
2728 if (napi_reschedule(napi)) {
2729 disable_scrq_irq(adapter, rx_scrq);
2735 return frames_processed;
2738 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2742 adapter->fallback.mtu = adapter->req_mtu;
2743 adapter->fallback.rx_queues = adapter->req_rx_queues;
2744 adapter->fallback.tx_queues = adapter->req_tx_queues;
2745 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2746 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2748 reinit_completion(&adapter->reset_done);
2749 adapter->wait_for_reset = true;
2750 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2756 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2763 if (adapter->reset_done_rc) {
2765 adapter->desired.mtu = adapter->fallback.mtu;
2766 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2767 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2768 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2769 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2771 reinit_completion(&adapter->reset_done);
2772 adapter->wait_for_reset = true;
2773 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2778 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2786 adapter->wait_for_reset = false;
2791 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2793 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2795 adapter->desired.mtu = new_mtu + ETH_HLEN;
2797 return wait_for_reset(adapter);
2800 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2801 struct net_device *dev,
2802 netdev_features_t features)
2804 /* Some backing hardware adapters can not
2805 * handle packets with a MSS less than 224
2806 * or with only one segment.
2808 if (skb_is_gso(skb)) {
2809 if (skb_shinfo(skb)->gso_size < 224 ||
2810 skb_shinfo(skb)->gso_segs == 1)
2811 features &= ~NETIF_F_GSO_MASK;
2817 static const struct net_device_ops ibmvnic_netdev_ops = {
2818 .ndo_open = ibmvnic_open,
2819 .ndo_stop = ibmvnic_close,
2820 .ndo_start_xmit = ibmvnic_xmit,
2821 .ndo_set_rx_mode = ibmvnic_set_multi,
2822 .ndo_set_mac_address = ibmvnic_set_mac,
2823 .ndo_validate_addr = eth_validate_addr,
2824 .ndo_tx_timeout = ibmvnic_tx_timeout,
2825 .ndo_change_mtu = ibmvnic_change_mtu,
2826 .ndo_features_check = ibmvnic_features_check,
2829 /* ethtool functions */
2831 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2832 struct ethtool_link_ksettings *cmd)
2834 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2837 rc = send_query_phys_parms(adapter);
2839 adapter->speed = SPEED_UNKNOWN;
2840 adapter->duplex = DUPLEX_UNKNOWN;
2842 cmd->base.speed = adapter->speed;
2843 cmd->base.duplex = adapter->duplex;
2844 cmd->base.port = PORT_FIBRE;
2845 cmd->base.phy_address = 0;
2846 cmd->base.autoneg = AUTONEG_ENABLE;
2851 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2852 struct ethtool_drvinfo *info)
2854 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2856 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2857 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2858 strscpy(info->fw_version, adapter->fw_version,
2859 sizeof(info->fw_version));
2862 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2864 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2866 return adapter->msg_enable;
2869 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2871 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2873 adapter->msg_enable = data;
2876 static u32 ibmvnic_get_link(struct net_device *netdev)
2878 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2880 /* Don't need to send a query because we request a logical link up at
2881 * init and then we wait for link state indications
2883 return adapter->logical_link_state;
2886 static void ibmvnic_get_ringparam(struct net_device *netdev,
2887 struct ethtool_ringparam *ring)
2889 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2891 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2892 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2893 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2895 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2896 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2898 ring->rx_mini_max_pending = 0;
2899 ring->rx_jumbo_max_pending = 0;
2900 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2901 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2902 ring->rx_mini_pending = 0;
2903 ring->rx_jumbo_pending = 0;
2906 static int ibmvnic_set_ringparam(struct net_device *netdev,
2907 struct ethtool_ringparam *ring)
2909 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2913 adapter->desired.rx_entries = ring->rx_pending;
2914 adapter->desired.tx_entries = ring->tx_pending;
2916 ret = wait_for_reset(adapter);
2919 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2920 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2922 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2923 ring->rx_pending, ring->tx_pending,
2924 adapter->req_rx_add_entries_per_subcrq,
2925 adapter->req_tx_entries_per_subcrq);
2929 static void ibmvnic_get_channels(struct net_device *netdev,
2930 struct ethtool_channels *channels)
2932 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2934 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2935 channels->max_rx = adapter->max_rx_queues;
2936 channels->max_tx = adapter->max_tx_queues;
2938 channels->max_rx = IBMVNIC_MAX_QUEUES;
2939 channels->max_tx = IBMVNIC_MAX_QUEUES;
2942 channels->max_other = 0;
2943 channels->max_combined = 0;
2944 channels->rx_count = adapter->req_rx_queues;
2945 channels->tx_count = adapter->req_tx_queues;
2946 channels->other_count = 0;
2947 channels->combined_count = 0;
2950 static int ibmvnic_set_channels(struct net_device *netdev,
2951 struct ethtool_channels *channels)
2953 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2957 adapter->desired.rx_queues = channels->rx_count;
2958 adapter->desired.tx_queues = channels->tx_count;
2960 ret = wait_for_reset(adapter);
2963 (adapter->req_rx_queues != channels->rx_count ||
2964 adapter->req_tx_queues != channels->tx_count))
2966 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2967 channels->rx_count, channels->tx_count,
2968 adapter->req_rx_queues, adapter->req_tx_queues);
2972 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2974 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2977 switch (stringset) {
2979 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2980 i++, data += ETH_GSTRING_LEN)
2981 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2983 for (i = 0; i < adapter->req_tx_queues; i++) {
2984 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2985 data += ETH_GSTRING_LEN;
2987 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2988 data += ETH_GSTRING_LEN;
2990 snprintf(data, ETH_GSTRING_LEN,
2991 "tx%d_dropped_packets", i);
2992 data += ETH_GSTRING_LEN;
2995 for (i = 0; i < adapter->req_rx_queues; i++) {
2996 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2997 data += ETH_GSTRING_LEN;
2999 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3000 data += ETH_GSTRING_LEN;
3002 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3003 data += ETH_GSTRING_LEN;
3007 case ETH_SS_PRIV_FLAGS:
3008 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
3009 strcpy(data + i * ETH_GSTRING_LEN,
3010 ibmvnic_priv_flags[i]);
3017 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3019 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3023 return ARRAY_SIZE(ibmvnic_stats) +
3024 adapter->req_tx_queues * NUM_TX_STATS +
3025 adapter->req_rx_queues * NUM_RX_STATS;
3026 case ETH_SS_PRIV_FLAGS:
3027 return ARRAY_SIZE(ibmvnic_priv_flags);
3033 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3034 struct ethtool_stats *stats, u64 *data)
3036 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3037 union ibmvnic_crq crq;
3041 memset(&crq, 0, sizeof(crq));
3042 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3043 crq.request_statistics.cmd = REQUEST_STATISTICS;
3044 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3045 crq.request_statistics.len =
3046 cpu_to_be32(sizeof(struct ibmvnic_statistics));
3048 /* Wait for data to be written */
3049 reinit_completion(&adapter->stats_done);
3050 rc = ibmvnic_send_crq(adapter, &crq);
3053 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3057 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3058 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3059 (adapter, ibmvnic_stats[i].offset));
3061 for (j = 0; j < adapter->req_tx_queues; j++) {
3062 data[i] = adapter->tx_stats_buffers[j].packets;
3064 data[i] = adapter->tx_stats_buffers[j].bytes;
3066 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3070 for (j = 0; j < adapter->req_rx_queues; j++) {
3071 data[i] = adapter->rx_stats_buffers[j].packets;
3073 data[i] = adapter->rx_stats_buffers[j].bytes;
3075 data[i] = adapter->rx_stats_buffers[j].interrupts;
3080 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
3082 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3084 return adapter->priv_flags;
3087 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
3089 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3090 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
3093 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
3095 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
3100 static const struct ethtool_ops ibmvnic_ethtool_ops = {
3101 .get_drvinfo = ibmvnic_get_drvinfo,
3102 .get_msglevel = ibmvnic_get_msglevel,
3103 .set_msglevel = ibmvnic_set_msglevel,
3104 .get_link = ibmvnic_get_link,
3105 .get_ringparam = ibmvnic_get_ringparam,
3106 .set_ringparam = ibmvnic_set_ringparam,
3107 .get_channels = ibmvnic_get_channels,
3108 .set_channels = ibmvnic_set_channels,
3109 .get_strings = ibmvnic_get_strings,
3110 .get_sset_count = ibmvnic_get_sset_count,
3111 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
3112 .get_link_ksettings = ibmvnic_get_link_ksettings,
3113 .get_priv_flags = ibmvnic_get_priv_flags,
3114 .set_priv_flags = ibmvnic_set_priv_flags,
3117 /* Routines for managing CRQs/sCRQs */
3119 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3120 struct ibmvnic_sub_crq_queue *scrq)
3125 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3130 free_irq(scrq->irq, scrq);
3131 irq_dispose_mapping(scrq->irq);
3136 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3137 atomic_set(&scrq->used, 0);
3139 scrq->ind_buf.index = 0;
3141 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3145 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3146 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3150 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3154 if (!adapter->tx_scrq || !adapter->rx_scrq)
3157 for (i = 0; i < adapter->req_tx_queues; i++) {
3158 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3159 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3164 for (i = 0; i < adapter->req_rx_queues; i++) {
3165 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3166 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3174 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3175 struct ibmvnic_sub_crq_queue *scrq,
3178 struct device *dev = &adapter->vdev->dev;
3181 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3184 /* Close the sub-crqs */
3186 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3187 adapter->vdev->unit_address,
3189 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3192 netdev_err(adapter->netdev,
3193 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3198 dma_free_coherent(dev,
3200 scrq->ind_buf.indir_arr,
3201 scrq->ind_buf.indir_dma);
3203 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3205 free_pages((unsigned long)scrq->msgs, 2);
3209 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3212 struct device *dev = &adapter->vdev->dev;
3213 struct ibmvnic_sub_crq_queue *scrq;
3216 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3221 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3223 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3224 goto zero_page_failed;
3227 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3229 if (dma_mapping_error(dev, scrq->msg_token)) {
3230 dev_warn(dev, "Couldn't map crq queue messages page\n");
3234 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3235 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3237 if (rc == H_RESOURCE)
3238 rc = ibmvnic_reset_crq(adapter);
3240 if (rc == H_CLOSED) {
3241 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3243 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3247 scrq->adapter = adapter;
3248 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3249 scrq->ind_buf.index = 0;
3251 scrq->ind_buf.indir_arr =
3252 dma_alloc_coherent(dev,
3254 &scrq->ind_buf.indir_dma,
3257 if (!scrq->ind_buf.indir_arr)
3260 spin_lock_init(&scrq->lock);
3262 netdev_dbg(adapter->netdev,
3263 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3264 scrq->crq_num, scrq->hw_irq, scrq->irq);
3270 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3271 adapter->vdev->unit_address,
3273 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3275 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3278 free_pages((unsigned long)scrq->msgs, 2);
3285 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3289 if (adapter->tx_scrq) {
3290 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3291 if (!adapter->tx_scrq[i])
3294 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3296 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
3297 if (adapter->tx_scrq[i]->irq) {
3298 free_irq(adapter->tx_scrq[i]->irq,
3299 adapter->tx_scrq[i]);
3300 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3301 adapter->tx_scrq[i]->irq = 0;
3304 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3308 kfree(adapter->tx_scrq);
3309 adapter->tx_scrq = NULL;
3310 adapter->num_active_tx_scrqs = 0;
3313 if (adapter->rx_scrq) {
3314 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3315 if (!adapter->rx_scrq[i])
3318 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3320 if (adapter->rx_scrq[i]->irq) {
3321 free_irq(adapter->rx_scrq[i]->irq,
3322 adapter->rx_scrq[i]);
3323 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3324 adapter->rx_scrq[i]->irq = 0;
3327 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3331 kfree(adapter->rx_scrq);
3332 adapter->rx_scrq = NULL;
3333 adapter->num_active_rx_scrqs = 0;
3337 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3338 struct ibmvnic_sub_crq_queue *scrq)
3340 struct device *dev = &adapter->vdev->dev;
3343 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3344 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3346 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3351 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3352 struct ibmvnic_sub_crq_queue *scrq)
3354 struct device *dev = &adapter->vdev->dev;
3357 if (scrq->hw_irq > 0x100000000ULL) {
3358 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3362 if (test_bit(0, &adapter->resetting) &&
3363 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3364 u64 val = (0xff000000) | scrq->hw_irq;
3366 rc = plpar_hcall_norets(H_EOI, val);
3367 /* H_EOI would fail with rc = H_FUNCTION when running
3368 * in XIVE mode which is expected, but not an error.
3370 if (rc && (rc != H_FUNCTION))
3371 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3375 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3376 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3378 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3383 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3384 struct ibmvnic_sub_crq_queue *scrq)
3386 struct device *dev = &adapter->vdev->dev;
3387 struct ibmvnic_tx_pool *tx_pool;
3388 struct ibmvnic_tx_buff *txbuff;
3389 struct netdev_queue *txq;
3390 union sub_crq *next;
3395 while (pending_scrq(adapter, scrq)) {
3396 unsigned int pool = scrq->pool_index;
3397 int num_entries = 0;
3398 int total_bytes = 0;
3399 int num_packets = 0;
3401 next = ibmvnic_next_scrq(adapter, scrq);
3402 for (i = 0; i < next->tx_comp.num_comps; i++) {
3403 index = be32_to_cpu(next->tx_comp.correlators[i]);
3404 if (index & IBMVNIC_TSO_POOL_MASK) {
3405 tx_pool = &adapter->tso_pool[pool];
3406 index &= ~IBMVNIC_TSO_POOL_MASK;
3408 tx_pool = &adapter->tx_pool[pool];
3411 txbuff = &tx_pool->tx_buff[index];
3413 num_entries += txbuff->num_entries;
3415 total_bytes += txbuff->skb->len;
3416 if (next->tx_comp.rcs[i]) {
3417 dev_err(dev, "tx error %x\n",
3418 next->tx_comp.rcs[i]);
3419 dev_kfree_skb_irq(txbuff->skb);
3421 dev_consume_skb_irq(txbuff->skb);
3425 netdev_warn(adapter->netdev,
3426 "TX completion received with NULL socket buffer\n");
3428 tx_pool->free_map[tx_pool->producer_index] = index;
3429 tx_pool->producer_index =
3430 (tx_pool->producer_index + 1) %
3431 tx_pool->num_buffers;
3433 /* remove tx_comp scrq*/
3434 next->tx_comp.first = 0;
3436 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3437 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3439 if (atomic_sub_return(num_entries, &scrq->used) <=
3440 (adapter->req_tx_entries_per_subcrq / 2) &&
3441 __netif_subqueue_stopped(adapter->netdev,
3442 scrq->pool_index)) {
3443 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3444 netdev_dbg(adapter->netdev, "Started queue %d\n",
3449 enable_scrq_irq(adapter, scrq);
3451 if (pending_scrq(adapter, scrq)) {
3452 disable_scrq_irq(adapter, scrq);
3459 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3461 struct ibmvnic_sub_crq_queue *scrq = instance;
3462 struct ibmvnic_adapter *adapter = scrq->adapter;
3464 disable_scrq_irq(adapter, scrq);
3465 ibmvnic_complete_tx(adapter, scrq);
3470 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3472 struct ibmvnic_sub_crq_queue *scrq = instance;
3473 struct ibmvnic_adapter *adapter = scrq->adapter;
3475 /* When booting a kdump kernel we can hit pending interrupts
3476 * prior to completing driver initialization.
3478 if (unlikely(adapter->state != VNIC_OPEN))
3481 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3483 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3484 disable_scrq_irq(adapter, scrq);
3485 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3491 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3493 struct device *dev = &adapter->vdev->dev;
3494 struct ibmvnic_sub_crq_queue *scrq;
3498 for (i = 0; i < adapter->req_tx_queues; i++) {
3499 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3501 scrq = adapter->tx_scrq[i];
3502 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3506 dev_err(dev, "Error mapping irq\n");
3507 goto req_tx_irq_failed;
3510 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3511 adapter->vdev->unit_address, i);
3512 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3513 0, scrq->name, scrq);
3516 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3518 irq_dispose_mapping(scrq->irq);
3519 goto req_tx_irq_failed;
3523 for (i = 0; i < adapter->req_rx_queues; i++) {
3524 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3526 scrq = adapter->rx_scrq[i];
3527 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3530 dev_err(dev, "Error mapping irq\n");
3531 goto req_rx_irq_failed;
3533 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3534 adapter->vdev->unit_address, i);
3535 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3536 0, scrq->name, scrq);
3538 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3540 irq_dispose_mapping(scrq->irq);
3541 goto req_rx_irq_failed;
3547 for (j = 0; j < i; j++) {
3548 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3549 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3551 i = adapter->req_tx_queues;
3553 for (j = 0; j < i; j++) {
3554 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3555 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3557 release_sub_crqs(adapter, 1);
3561 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3563 struct device *dev = &adapter->vdev->dev;
3564 struct ibmvnic_sub_crq_queue **allqueues;
3565 int registered_queues = 0;
3570 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3572 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3576 for (i = 0; i < total_queues; i++) {
3577 allqueues[i] = init_sub_crq_queue(adapter);
3578 if (!allqueues[i]) {
3579 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3582 registered_queues++;
3585 /* Make sure we were able to register the minimum number of queues */
3586 if (registered_queues <
3587 adapter->min_tx_queues + adapter->min_rx_queues) {
3588 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3592 /* Distribute the failed allocated queues*/
3593 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3594 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3597 if (adapter->req_rx_queues > adapter->min_rx_queues)
3598 adapter->req_rx_queues--;
3603 if (adapter->req_tx_queues > adapter->min_tx_queues)
3604 adapter->req_tx_queues--;
3611 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3612 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3613 if (!adapter->tx_scrq)
3616 for (i = 0; i < adapter->req_tx_queues; i++) {
3617 adapter->tx_scrq[i] = allqueues[i];
3618 adapter->tx_scrq[i]->pool_index = i;
3619 adapter->num_active_tx_scrqs++;
3622 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3623 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3624 if (!adapter->rx_scrq)
3627 for (i = 0; i < adapter->req_rx_queues; i++) {
3628 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3629 adapter->rx_scrq[i]->scrq_num = i;
3630 adapter->num_active_rx_scrqs++;
3637 kfree(adapter->tx_scrq);
3638 adapter->tx_scrq = NULL;
3640 for (i = 0; i < registered_queues; i++)
3641 release_sub_crq_queue(adapter, allqueues[i], 1);
3646 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3648 struct device *dev = &adapter->vdev->dev;
3649 union ibmvnic_crq crq;
3653 /* Sub-CRQ entries are 32 byte long */
3654 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3656 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3657 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3658 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3662 if (adapter->desired.mtu)
3663 adapter->req_mtu = adapter->desired.mtu;
3665 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3667 if (!adapter->desired.tx_entries)
3668 adapter->desired.tx_entries =
3669 adapter->max_tx_entries_per_subcrq;
3670 if (!adapter->desired.rx_entries)
3671 adapter->desired.rx_entries =
3672 adapter->max_rx_add_entries_per_subcrq;
3674 max_entries = IBMVNIC_MAX_LTB_SIZE /
3675 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3677 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3678 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3679 adapter->desired.tx_entries = max_entries;
3682 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3683 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3684 adapter->desired.rx_entries = max_entries;
3687 if (adapter->desired.tx_entries)
3688 adapter->req_tx_entries_per_subcrq =
3689 adapter->desired.tx_entries;
3691 adapter->req_tx_entries_per_subcrq =
3692 adapter->max_tx_entries_per_subcrq;
3694 if (adapter->desired.rx_entries)
3695 adapter->req_rx_add_entries_per_subcrq =
3696 adapter->desired.rx_entries;
3698 adapter->req_rx_add_entries_per_subcrq =
3699 adapter->max_rx_add_entries_per_subcrq;
3701 if (adapter->desired.tx_queues)
3702 adapter->req_tx_queues =
3703 adapter->desired.tx_queues;
3705 adapter->req_tx_queues =
3706 adapter->opt_tx_comp_sub_queues;
3708 if (adapter->desired.rx_queues)
3709 adapter->req_rx_queues =
3710 adapter->desired.rx_queues;
3712 adapter->req_rx_queues =
3713 adapter->opt_rx_comp_queues;
3715 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3718 memset(&crq, 0, sizeof(crq));
3719 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3720 crq.request_capability.cmd = REQUEST_CAPABILITY;
3722 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3723 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3724 atomic_inc(&adapter->running_cap_crqs);
3725 ibmvnic_send_crq(adapter, &crq);
3727 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3728 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3729 atomic_inc(&adapter->running_cap_crqs);
3730 ibmvnic_send_crq(adapter, &crq);
3732 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3733 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3734 atomic_inc(&adapter->running_cap_crqs);
3735 ibmvnic_send_crq(adapter, &crq);
3737 crq.request_capability.capability =
3738 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3739 crq.request_capability.number =
3740 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3741 atomic_inc(&adapter->running_cap_crqs);
3742 ibmvnic_send_crq(adapter, &crq);
3744 crq.request_capability.capability =
3745 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3746 crq.request_capability.number =
3747 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3748 atomic_inc(&adapter->running_cap_crqs);
3749 ibmvnic_send_crq(adapter, &crq);
3751 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3752 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3753 atomic_inc(&adapter->running_cap_crqs);
3754 ibmvnic_send_crq(adapter, &crq);
3756 if (adapter->netdev->flags & IFF_PROMISC) {
3757 if (adapter->promisc_supported) {
3758 crq.request_capability.capability =
3759 cpu_to_be16(PROMISC_REQUESTED);
3760 crq.request_capability.number = cpu_to_be64(1);
3761 atomic_inc(&adapter->running_cap_crqs);
3762 ibmvnic_send_crq(adapter, &crq);
3765 crq.request_capability.capability =
3766 cpu_to_be16(PROMISC_REQUESTED);
3767 crq.request_capability.number = cpu_to_be64(0);
3768 atomic_inc(&adapter->running_cap_crqs);
3769 ibmvnic_send_crq(adapter, &crq);
3773 static int pending_scrq(struct ibmvnic_adapter *adapter,
3774 struct ibmvnic_sub_crq_queue *scrq)
3776 union sub_crq *entry = &scrq->msgs[scrq->cur];
3779 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
3781 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3782 * contents of the SCRQ descriptor
3789 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3790 struct ibmvnic_sub_crq_queue *scrq)
3792 union sub_crq *entry;
3793 unsigned long flags;
3795 spin_lock_irqsave(&scrq->lock, flags);
3796 entry = &scrq->msgs[scrq->cur];
3797 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3798 if (++scrq->cur == scrq->size)
3803 spin_unlock_irqrestore(&scrq->lock, flags);
3805 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3806 * contents of the SCRQ descriptor
3813 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3815 struct ibmvnic_crq_queue *queue = &adapter->crq;
3816 union ibmvnic_crq *crq;
3818 crq = &queue->msgs[queue->cur];
3819 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3820 if (++queue->cur == queue->size)
3829 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3833 dev_warn_ratelimited(dev,
3834 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3838 dev_warn_ratelimited(dev,
3839 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3843 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3848 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3849 u64 remote_handle, u64 ioba, u64 num_entries)
3851 unsigned int ua = adapter->vdev->unit_address;
3852 struct device *dev = &adapter->vdev->dev;
3855 /* Make sure the hypervisor sees the complete request */
3857 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3858 cpu_to_be64(remote_handle),
3862 print_subcrq_error(dev, rc, __func__);
3867 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3868 union ibmvnic_crq *crq)
3870 unsigned int ua = adapter->vdev->unit_address;
3871 struct device *dev = &adapter->vdev->dev;
3872 u64 *u64_crq = (u64 *)crq;
3875 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3876 (unsigned long)cpu_to_be64(u64_crq[0]),
3877 (unsigned long)cpu_to_be64(u64_crq[1]));
3879 if (!adapter->crq.active &&
3880 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3881 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3885 /* Make sure the hypervisor sees the complete request */
3888 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3889 cpu_to_be64(u64_crq[0]),
3890 cpu_to_be64(u64_crq[1]));
3893 if (rc == H_CLOSED) {
3894 dev_warn(dev, "CRQ Queue closed\n");
3895 /* do not reset, report the fail, wait for passive init from server */
3898 dev_warn(dev, "Send error (rc=%d)\n", rc);
3904 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3906 struct device *dev = &adapter->vdev->dev;
3907 union ibmvnic_crq crq;
3911 memset(&crq, 0, sizeof(crq));
3912 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3913 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3914 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3917 rc = ibmvnic_send_crq(adapter, &crq);
3923 } while (retries > 0);
3926 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3933 struct vnic_login_client_data {
3939 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3943 /* Calculate the amount of buffer space needed for the
3944 * vnic client data in the login buffer. There are four entries,
3945 * OS name, LPAR name, device name, and a null last entry.
3947 len = 4 * sizeof(struct vnic_login_client_data);
3948 len += 6; /* "Linux" plus NULL */
3949 len += strlen(utsname()->nodename) + 1;
3950 len += strlen(adapter->netdev->name) + 1;
3955 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3956 struct vnic_login_client_data *vlcd)
3958 const char *os_name = "Linux";
3961 /* Type 1 - LPAR OS */
3963 len = strlen(os_name) + 1;
3964 vlcd->len = cpu_to_be16(len);
3965 strscpy(vlcd->name, os_name, len);
3966 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3968 /* Type 2 - LPAR name */
3970 len = strlen(utsname()->nodename) + 1;
3971 vlcd->len = cpu_to_be16(len);
3972 strscpy(vlcd->name, utsname()->nodename, len);
3973 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3975 /* Type 3 - device name */
3977 len = strlen(adapter->netdev->name) + 1;
3978 vlcd->len = cpu_to_be16(len);
3979 strscpy(vlcd->name, adapter->netdev->name, len);
3982 static int send_login(struct ibmvnic_adapter *adapter)
3984 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3985 struct ibmvnic_login_buffer *login_buffer;
3986 struct device *dev = &adapter->vdev->dev;
3987 struct vnic_login_client_data *vlcd;
3988 dma_addr_t rsp_buffer_token;
3989 dma_addr_t buffer_token;
3990 size_t rsp_buffer_size;
3991 union ibmvnic_crq crq;
3992 int client_data_len;
3999 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4000 netdev_err(adapter->netdev,
4001 "RX or TX queues are not allocated, device login failed\n");
4005 release_login_buffer(adapter);
4006 release_login_rsp_buffer(adapter);
4008 client_data_len = vnic_client_data_len(adapter);
4011 sizeof(struct ibmvnic_login_buffer) +
4012 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4015 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4017 goto buf_alloc_failed;
4019 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4021 if (dma_mapping_error(dev, buffer_token)) {
4022 dev_err(dev, "Couldn't map login buffer\n");
4023 goto buf_map_failed;
4026 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4027 sizeof(u64) * adapter->req_tx_queues +
4028 sizeof(u64) * adapter->req_rx_queues +
4029 sizeof(u64) * adapter->req_rx_queues +
4030 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4032 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4033 if (!login_rsp_buffer)
4034 goto buf_rsp_alloc_failed;
4036 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4037 rsp_buffer_size, DMA_FROM_DEVICE);
4038 if (dma_mapping_error(dev, rsp_buffer_token)) {
4039 dev_err(dev, "Couldn't map login rsp buffer\n");
4040 goto buf_rsp_map_failed;
4043 adapter->login_buf = login_buffer;
4044 adapter->login_buf_token = buffer_token;
4045 adapter->login_buf_sz = buffer_size;
4046 adapter->login_rsp_buf = login_rsp_buffer;
4047 adapter->login_rsp_buf_token = rsp_buffer_token;
4048 adapter->login_rsp_buf_sz = rsp_buffer_size;
4050 login_buffer->len = cpu_to_be32(buffer_size);
4051 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4052 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4053 login_buffer->off_txcomp_subcrqs =
4054 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4055 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4056 login_buffer->off_rxcomp_subcrqs =
4057 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4058 sizeof(u64) * adapter->req_tx_queues);
4059 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4060 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4062 tx_list_p = (__be64 *)((char *)login_buffer +
4063 sizeof(struct ibmvnic_login_buffer));
4064 rx_list_p = (__be64 *)((char *)login_buffer +
4065 sizeof(struct ibmvnic_login_buffer) +
4066 sizeof(u64) * adapter->req_tx_queues);
4068 for (i = 0; i < adapter->req_tx_queues; i++) {
4069 if (adapter->tx_scrq[i]) {
4071 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4075 for (i = 0; i < adapter->req_rx_queues; i++) {
4076 if (adapter->rx_scrq[i]) {
4078 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4082 /* Insert vNIC login client data */
4083 vlcd = (struct vnic_login_client_data *)
4084 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4085 login_buffer->client_data_offset =
4086 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4087 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4089 vnic_add_client_data(adapter, vlcd);
4091 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4092 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4093 netdev_dbg(adapter->netdev, "%016lx\n",
4094 ((unsigned long *)(adapter->login_buf))[i]);
4097 memset(&crq, 0, sizeof(crq));
4098 crq.login.first = IBMVNIC_CRQ_CMD;
4099 crq.login.cmd = LOGIN;
4100 crq.login.ioba = cpu_to_be32(buffer_token);
4101 crq.login.len = cpu_to_be32(buffer_size);
4103 adapter->login_pending = true;
4104 rc = ibmvnic_send_crq(adapter, &crq);
4106 adapter->login_pending = false;
4107 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4108 goto buf_rsp_map_failed;
4114 kfree(login_rsp_buffer);
4115 adapter->login_rsp_buf = NULL;
4116 buf_rsp_alloc_failed:
4117 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4119 kfree(login_buffer);
4120 adapter->login_buf = NULL;
4125 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4128 union ibmvnic_crq crq;
4130 memset(&crq, 0, sizeof(crq));
4131 crq.request_map.first = IBMVNIC_CRQ_CMD;
4132 crq.request_map.cmd = REQUEST_MAP;
4133 crq.request_map.map_id = map_id;
4134 crq.request_map.ioba = cpu_to_be32(addr);
4135 crq.request_map.len = cpu_to_be32(len);
4136 return ibmvnic_send_crq(adapter, &crq);
4139 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4141 union ibmvnic_crq crq;
4143 memset(&crq, 0, sizeof(crq));
4144 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4145 crq.request_unmap.cmd = REQUEST_UNMAP;
4146 crq.request_unmap.map_id = map_id;
4147 return ibmvnic_send_crq(adapter, &crq);
4150 static void send_query_map(struct ibmvnic_adapter *adapter)
4152 union ibmvnic_crq crq;
4154 memset(&crq, 0, sizeof(crq));
4155 crq.query_map.first = IBMVNIC_CRQ_CMD;
4156 crq.query_map.cmd = QUERY_MAP;
4157 ibmvnic_send_crq(adapter, &crq);
4160 /* Send a series of CRQs requesting various capabilities of the VNIC server */
4161 static void send_query_cap(struct ibmvnic_adapter *adapter)
4163 union ibmvnic_crq crq;
4165 atomic_set(&adapter->running_cap_crqs, 0);
4166 memset(&crq, 0, sizeof(crq));
4167 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4168 crq.query_capability.cmd = QUERY_CAPABILITY;
4170 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4171 atomic_inc(&adapter->running_cap_crqs);
4172 ibmvnic_send_crq(adapter, &crq);
4174 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4175 atomic_inc(&adapter->running_cap_crqs);
4176 ibmvnic_send_crq(adapter, &crq);
4178 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4179 atomic_inc(&adapter->running_cap_crqs);
4180 ibmvnic_send_crq(adapter, &crq);
4182 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4183 atomic_inc(&adapter->running_cap_crqs);
4184 ibmvnic_send_crq(adapter, &crq);
4186 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4187 atomic_inc(&adapter->running_cap_crqs);
4188 ibmvnic_send_crq(adapter, &crq);
4190 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4191 atomic_inc(&adapter->running_cap_crqs);
4192 ibmvnic_send_crq(adapter, &crq);
4194 crq.query_capability.capability =
4195 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4196 atomic_inc(&adapter->running_cap_crqs);
4197 ibmvnic_send_crq(adapter, &crq);
4199 crq.query_capability.capability =
4200 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4201 atomic_inc(&adapter->running_cap_crqs);
4202 ibmvnic_send_crq(adapter, &crq);
4204 crq.query_capability.capability =
4205 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4206 atomic_inc(&adapter->running_cap_crqs);
4207 ibmvnic_send_crq(adapter, &crq);
4209 crq.query_capability.capability =
4210 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4211 atomic_inc(&adapter->running_cap_crqs);
4212 ibmvnic_send_crq(adapter, &crq);
4214 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4215 atomic_inc(&adapter->running_cap_crqs);
4216 ibmvnic_send_crq(adapter, &crq);
4218 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4219 atomic_inc(&adapter->running_cap_crqs);
4220 ibmvnic_send_crq(adapter, &crq);
4222 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4223 atomic_inc(&adapter->running_cap_crqs);
4224 ibmvnic_send_crq(adapter, &crq);
4226 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4227 atomic_inc(&adapter->running_cap_crqs);
4228 ibmvnic_send_crq(adapter, &crq);
4230 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4231 atomic_inc(&adapter->running_cap_crqs);
4232 ibmvnic_send_crq(adapter, &crq);
4234 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4235 atomic_inc(&adapter->running_cap_crqs);
4236 ibmvnic_send_crq(adapter, &crq);
4238 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4239 atomic_inc(&adapter->running_cap_crqs);
4240 ibmvnic_send_crq(adapter, &crq);
4242 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4243 atomic_inc(&adapter->running_cap_crqs);
4244 ibmvnic_send_crq(adapter, &crq);
4246 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4247 atomic_inc(&adapter->running_cap_crqs);
4248 ibmvnic_send_crq(adapter, &crq);
4250 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4251 atomic_inc(&adapter->running_cap_crqs);
4252 ibmvnic_send_crq(adapter, &crq);
4254 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4255 atomic_inc(&adapter->running_cap_crqs);
4256 ibmvnic_send_crq(adapter, &crq);
4258 crq.query_capability.capability =
4259 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4260 atomic_inc(&adapter->running_cap_crqs);
4261 ibmvnic_send_crq(adapter, &crq);
4263 crq.query_capability.capability =
4264 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4265 atomic_inc(&adapter->running_cap_crqs);
4266 ibmvnic_send_crq(adapter, &crq);
4268 crq.query_capability.capability =
4269 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4270 atomic_inc(&adapter->running_cap_crqs);
4271 ibmvnic_send_crq(adapter, &crq);
4273 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4274 atomic_inc(&adapter->running_cap_crqs);
4275 ibmvnic_send_crq(adapter, &crq);
4278 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4280 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4281 struct device *dev = &adapter->vdev->dev;
4282 union ibmvnic_crq crq;
4284 adapter->ip_offload_tok =
4286 &adapter->ip_offload_buf,
4290 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4291 if (!firmware_has_feature(FW_FEATURE_CMO))
4292 dev_err(dev, "Couldn't map offload buffer\n");
4296 memset(&crq, 0, sizeof(crq));
4297 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4298 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4299 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4300 crq.query_ip_offload.ioba =
4301 cpu_to_be32(adapter->ip_offload_tok);
4303 ibmvnic_send_crq(adapter, &crq);
4306 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4308 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4309 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4310 struct device *dev = &adapter->vdev->dev;
4311 netdev_features_t old_hw_features = 0;
4312 union ibmvnic_crq crq;
4314 adapter->ip_offload_ctrl_tok =
4317 sizeof(adapter->ip_offload_ctrl),
4320 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4321 dev_err(dev, "Couldn't map ip offload control buffer\n");
4325 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4326 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4327 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4328 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4329 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4330 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4331 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4332 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4333 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4334 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4336 /* large_rx disabled for now, additional features needed */
4337 ctrl_buf->large_rx_ipv4 = 0;
4338 ctrl_buf->large_rx_ipv6 = 0;
4340 if (adapter->state != VNIC_PROBING) {
4341 old_hw_features = adapter->netdev->hw_features;
4342 adapter->netdev->hw_features = 0;
4345 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4347 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4348 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4350 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4351 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4353 if ((adapter->netdev->features &
4354 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4355 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4357 if (buf->large_tx_ipv4)
4358 adapter->netdev->hw_features |= NETIF_F_TSO;
4359 if (buf->large_tx_ipv6)
4360 adapter->netdev->hw_features |= NETIF_F_TSO6;
4362 if (adapter->state == VNIC_PROBING) {
4363 adapter->netdev->features |= adapter->netdev->hw_features;
4364 } else if (old_hw_features != adapter->netdev->hw_features) {
4365 netdev_features_t tmp = 0;
4367 /* disable features no longer supported */
4368 adapter->netdev->features &= adapter->netdev->hw_features;
4369 /* turn on features now supported if previously enabled */
4370 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4371 adapter->netdev->hw_features;
4372 adapter->netdev->features |=
4373 tmp & adapter->netdev->wanted_features;
4376 memset(&crq, 0, sizeof(crq));
4377 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4378 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4379 crq.control_ip_offload.len =
4380 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4381 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4382 ibmvnic_send_crq(adapter, &crq);
4385 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4386 struct ibmvnic_adapter *adapter)
4388 struct device *dev = &adapter->vdev->dev;
4390 if (crq->get_vpd_size_rsp.rc.code) {
4391 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4392 crq->get_vpd_size_rsp.rc.code);
4393 complete(&adapter->fw_done);
4397 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4398 complete(&adapter->fw_done);
4401 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4402 struct ibmvnic_adapter *adapter)
4404 struct device *dev = &adapter->vdev->dev;
4405 unsigned char *substr = NULL;
4406 u8 fw_level_len = 0;
4408 memset(adapter->fw_version, 0, 32);
4410 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4413 if (crq->get_vpd_rsp.rc.code) {
4414 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4415 crq->get_vpd_rsp.rc.code);
4419 /* get the position of the firmware version info
4420 * located after the ASCII 'RM' substring in the buffer
4422 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4424 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4428 /* get length of firmware level ASCII substring */
4429 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4430 fw_level_len = *(substr + 2);
4432 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4436 /* copy firmware version string from vpd into adapter */
4437 if ((substr + 3 + fw_level_len) <
4438 (adapter->vpd->buff + adapter->vpd->len)) {
4439 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4441 dev_info(dev, "FW substr extrapolated VPD buff\n");
4445 if (adapter->fw_version[0] == '\0')
4446 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
4447 complete(&adapter->fw_done);
4450 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4452 struct device *dev = &adapter->vdev->dev;
4453 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4456 dma_unmap_single(dev, adapter->ip_offload_tok,
4457 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4459 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4460 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4461 netdev_dbg(adapter->netdev, "%016lx\n",
4462 ((unsigned long *)(buf))[i]);
4464 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4465 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4466 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4467 buf->tcp_ipv4_chksum);
4468 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4469 buf->tcp_ipv6_chksum);
4470 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4471 buf->udp_ipv4_chksum);
4472 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4473 buf->udp_ipv6_chksum);
4474 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4475 buf->large_tx_ipv4);
4476 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4477 buf->large_tx_ipv6);
4478 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4479 buf->large_rx_ipv4);
4480 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4481 buf->large_rx_ipv6);
4482 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4483 buf->max_ipv4_header_size);
4484 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4485 buf->max_ipv6_header_size);
4486 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4487 buf->max_tcp_header_size);
4488 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4489 buf->max_udp_header_size);
4490 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4491 buf->max_large_tx_size);
4492 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4493 buf->max_large_rx_size);
4494 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4495 buf->ipv6_extension_header);
4496 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4497 buf->tcp_pseudosum_req);
4498 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4499 buf->num_ipv6_ext_headers);
4500 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4501 buf->off_ipv6_ext_headers);
4503 send_control_ip_offload(adapter);
4506 static const char *ibmvnic_fw_err_cause(u16 cause)
4509 case ADAPTER_PROBLEM:
4510 return "adapter problem";
4512 return "bus problem";
4514 return "firmware problem";
4516 return "device driver problem";
4518 return "EEH recovery";
4520 return "firmware updated";
4522 return "low Memory";
4528 static void handle_error_indication(union ibmvnic_crq *crq,
4529 struct ibmvnic_adapter *adapter)
4531 struct device *dev = &adapter->vdev->dev;
4534 cause = be16_to_cpu(crq->error_indication.error_cause);
4536 dev_warn_ratelimited(dev,
4537 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4538 crq->error_indication.flags
4539 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4540 ibmvnic_fw_err_cause(cause));
4542 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4543 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4545 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4548 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4549 struct ibmvnic_adapter *adapter)
4551 struct net_device *netdev = adapter->netdev;
4552 struct device *dev = &adapter->vdev->dev;
4555 rc = crq->change_mac_addr_rsp.rc.code;
4557 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4560 /* crq->change_mac_addr.mac_addr is the requested one
4561 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4563 ether_addr_copy(netdev->dev_addr,
4564 &crq->change_mac_addr_rsp.mac_addr[0]);
4565 ether_addr_copy(adapter->mac_addr,
4566 &crq->change_mac_addr_rsp.mac_addr[0]);
4568 complete(&adapter->fw_done);
4572 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4573 struct ibmvnic_adapter *adapter)
4575 struct device *dev = &adapter->vdev->dev;
4579 atomic_dec(&adapter->running_cap_crqs);
4580 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4582 req_value = &adapter->req_tx_queues;
4586 req_value = &adapter->req_rx_queues;
4589 case REQ_RX_ADD_QUEUES:
4590 req_value = &adapter->req_rx_add_queues;
4593 case REQ_TX_ENTRIES_PER_SUBCRQ:
4594 req_value = &adapter->req_tx_entries_per_subcrq;
4595 name = "tx_entries_per_subcrq";
4597 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4598 req_value = &adapter->req_rx_add_entries_per_subcrq;
4599 name = "rx_add_entries_per_subcrq";
4602 req_value = &adapter->req_mtu;
4605 case PROMISC_REQUESTED:
4606 req_value = &adapter->promisc;
4610 dev_err(dev, "Got invalid cap request rsp %d\n",
4611 crq->request_capability.capability);
4615 switch (crq->request_capability_rsp.rc.code) {
4618 case PARTIALSUCCESS:
4619 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4621 (long)be64_to_cpu(crq->request_capability_rsp.number),
4624 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4626 pr_err("mtu of %llu is not supported. Reverting.\n",
4628 *req_value = adapter->fallback.mtu;
4631 be64_to_cpu(crq->request_capability_rsp.number);
4634 send_request_cap(adapter, 1);
4637 dev_err(dev, "Error %d in request cap rsp\n",
4638 crq->request_capability_rsp.rc.code);
4642 /* Done receiving requested capabilities, query IP offload support */
4643 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4644 adapter->wait_capability = false;
4645 send_query_ip_offload(adapter);
4649 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4650 struct ibmvnic_adapter *adapter)
4652 struct device *dev = &adapter->vdev->dev;
4653 struct net_device *netdev = adapter->netdev;
4654 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4655 struct ibmvnic_login_buffer *login = adapter->login_buf;
4656 u64 *tx_handle_array;
4657 u64 *rx_handle_array;
4663 /* CHECK: Test/set of login_pending does not need to be atomic
4664 * because only ibmvnic_tasklet tests/clears this.
4666 if (!adapter->login_pending) {
4667 netdev_warn(netdev, "Ignoring unexpected login response\n");
4670 adapter->login_pending = false;
4672 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4674 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4675 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4677 /* If the number of queues requested can't be allocated by the
4678 * server, the login response will return with code 1. We will need
4679 * to resend the login buffer with fewer queues requested.
4681 if (login_rsp_crq->generic.rc.code) {
4682 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4683 complete(&adapter->init_done);
4687 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4689 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4690 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4691 netdev_dbg(adapter->netdev, "%016lx\n",
4692 ((unsigned long *)(adapter->login_rsp_buf))[i]);
4696 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4697 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4698 adapter->req_rx_add_queues !=
4699 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4700 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4701 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4704 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4705 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4706 /* variable buffer sizes are not supported, so just read the
4709 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4711 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4712 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4714 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4715 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4716 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4717 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4719 for (i = 0; i < num_tx_pools; i++)
4720 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4722 for (i = 0; i < num_rx_pools; i++)
4723 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4725 adapter->num_active_tx_scrqs = num_tx_pools;
4726 adapter->num_active_rx_scrqs = num_rx_pools;
4727 release_login_rsp_buffer(adapter);
4728 release_login_buffer(adapter);
4729 complete(&adapter->init_done);
4734 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4735 struct ibmvnic_adapter *adapter)
4737 struct device *dev = &adapter->vdev->dev;
4740 rc = crq->request_unmap_rsp.rc.code;
4742 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4745 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4746 struct ibmvnic_adapter *adapter)
4748 struct net_device *netdev = adapter->netdev;
4749 struct device *dev = &adapter->vdev->dev;
4752 rc = crq->query_map_rsp.rc.code;
4754 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4757 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4758 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4759 crq->query_map_rsp.free_pages);
4762 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4763 struct ibmvnic_adapter *adapter)
4765 struct net_device *netdev = adapter->netdev;
4766 struct device *dev = &adapter->vdev->dev;
4769 atomic_dec(&adapter->running_cap_crqs);
4770 netdev_dbg(netdev, "Outstanding queries: %d\n",
4771 atomic_read(&adapter->running_cap_crqs));
4772 rc = crq->query_capability.rc.code;
4774 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4778 switch (be16_to_cpu(crq->query_capability.capability)) {
4780 adapter->min_tx_queues =
4781 be64_to_cpu(crq->query_capability.number);
4782 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4783 adapter->min_tx_queues);
4786 adapter->min_rx_queues =
4787 be64_to_cpu(crq->query_capability.number);
4788 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4789 adapter->min_rx_queues);
4791 case MIN_RX_ADD_QUEUES:
4792 adapter->min_rx_add_queues =
4793 be64_to_cpu(crq->query_capability.number);
4794 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4795 adapter->min_rx_add_queues);
4798 adapter->max_tx_queues =
4799 be64_to_cpu(crq->query_capability.number);
4800 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4801 adapter->max_tx_queues);
4804 adapter->max_rx_queues =
4805 be64_to_cpu(crq->query_capability.number);
4806 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4807 adapter->max_rx_queues);
4809 case MAX_RX_ADD_QUEUES:
4810 adapter->max_rx_add_queues =
4811 be64_to_cpu(crq->query_capability.number);
4812 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4813 adapter->max_rx_add_queues);
4815 case MIN_TX_ENTRIES_PER_SUBCRQ:
4816 adapter->min_tx_entries_per_subcrq =
4817 be64_to_cpu(crq->query_capability.number);
4818 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4819 adapter->min_tx_entries_per_subcrq);
4821 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4822 adapter->min_rx_add_entries_per_subcrq =
4823 be64_to_cpu(crq->query_capability.number);
4824 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4825 adapter->min_rx_add_entries_per_subcrq);
4827 case MAX_TX_ENTRIES_PER_SUBCRQ:
4828 adapter->max_tx_entries_per_subcrq =
4829 be64_to_cpu(crq->query_capability.number);
4830 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4831 adapter->max_tx_entries_per_subcrq);
4833 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4834 adapter->max_rx_add_entries_per_subcrq =
4835 be64_to_cpu(crq->query_capability.number);
4836 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4837 adapter->max_rx_add_entries_per_subcrq);
4839 case TCP_IP_OFFLOAD:
4840 adapter->tcp_ip_offload =
4841 be64_to_cpu(crq->query_capability.number);
4842 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4843 adapter->tcp_ip_offload);
4845 case PROMISC_SUPPORTED:
4846 adapter->promisc_supported =
4847 be64_to_cpu(crq->query_capability.number);
4848 netdev_dbg(netdev, "promisc_supported = %lld\n",
4849 adapter->promisc_supported);
4852 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4853 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4854 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4857 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4858 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4859 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4861 case MAX_MULTICAST_FILTERS:
4862 adapter->max_multicast_filters =
4863 be64_to_cpu(crq->query_capability.number);
4864 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4865 adapter->max_multicast_filters);
4867 case VLAN_HEADER_INSERTION:
4868 adapter->vlan_header_insertion =
4869 be64_to_cpu(crq->query_capability.number);
4870 if (adapter->vlan_header_insertion)
4871 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4872 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4873 adapter->vlan_header_insertion);
4875 case RX_VLAN_HEADER_INSERTION:
4876 adapter->rx_vlan_header_insertion =
4877 be64_to_cpu(crq->query_capability.number);
4878 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4879 adapter->rx_vlan_header_insertion);
4881 case MAX_TX_SG_ENTRIES:
4882 adapter->max_tx_sg_entries =
4883 be64_to_cpu(crq->query_capability.number);
4884 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4885 adapter->max_tx_sg_entries);
4887 case RX_SG_SUPPORTED:
4888 adapter->rx_sg_supported =
4889 be64_to_cpu(crq->query_capability.number);
4890 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4891 adapter->rx_sg_supported);
4893 case OPT_TX_COMP_SUB_QUEUES:
4894 adapter->opt_tx_comp_sub_queues =
4895 be64_to_cpu(crq->query_capability.number);
4896 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4897 adapter->opt_tx_comp_sub_queues);
4899 case OPT_RX_COMP_QUEUES:
4900 adapter->opt_rx_comp_queues =
4901 be64_to_cpu(crq->query_capability.number);
4902 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4903 adapter->opt_rx_comp_queues);
4905 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4906 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4907 be64_to_cpu(crq->query_capability.number);
4908 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4909 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4911 case OPT_TX_ENTRIES_PER_SUBCRQ:
4912 adapter->opt_tx_entries_per_subcrq =
4913 be64_to_cpu(crq->query_capability.number);
4914 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4915 adapter->opt_tx_entries_per_subcrq);
4917 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4918 adapter->opt_rxba_entries_per_subcrq =
4919 be64_to_cpu(crq->query_capability.number);
4920 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4921 adapter->opt_rxba_entries_per_subcrq);
4923 case TX_RX_DESC_REQ:
4924 adapter->tx_rx_desc_req = crq->query_capability.number;
4925 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4926 adapter->tx_rx_desc_req);
4930 netdev_err(netdev, "Got invalid cap rsp %d\n",
4931 crq->query_capability.capability);
4935 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4936 adapter->wait_capability = false;
4937 send_request_cap(adapter, 0);
4941 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4943 union ibmvnic_crq crq;
4946 memset(&crq, 0, sizeof(crq));
4947 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4948 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4950 mutex_lock(&adapter->fw_lock);
4951 adapter->fw_done_rc = 0;
4952 reinit_completion(&adapter->fw_done);
4954 rc = ibmvnic_send_crq(adapter, &crq);
4956 mutex_unlock(&adapter->fw_lock);
4960 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4962 mutex_unlock(&adapter->fw_lock);
4966 mutex_unlock(&adapter->fw_lock);
4967 return adapter->fw_done_rc ? -EIO : 0;
4970 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4971 struct ibmvnic_adapter *adapter)
4973 struct net_device *netdev = adapter->netdev;
4975 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4977 rc = crq->query_phys_parms_rsp.rc.code;
4979 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4983 case IBMVNIC_10MBPS:
4984 adapter->speed = SPEED_10;
4986 case IBMVNIC_100MBPS:
4987 adapter->speed = SPEED_100;
4990 adapter->speed = SPEED_1000;
4992 case IBMVNIC_10GBPS:
4993 adapter->speed = SPEED_10000;
4995 case IBMVNIC_25GBPS:
4996 adapter->speed = SPEED_25000;
4998 case IBMVNIC_40GBPS:
4999 adapter->speed = SPEED_40000;
5001 case IBMVNIC_50GBPS:
5002 adapter->speed = SPEED_50000;
5004 case IBMVNIC_100GBPS:
5005 adapter->speed = SPEED_100000;
5007 case IBMVNIC_200GBPS:
5008 adapter->speed = SPEED_200000;
5011 if (netif_carrier_ok(netdev))
5012 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5013 adapter->speed = SPEED_UNKNOWN;
5015 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5016 adapter->duplex = DUPLEX_FULL;
5017 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5018 adapter->duplex = DUPLEX_HALF;
5020 adapter->duplex = DUPLEX_UNKNOWN;
5025 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5026 struct ibmvnic_adapter *adapter)
5028 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5029 struct net_device *netdev = adapter->netdev;
5030 struct device *dev = &adapter->vdev->dev;
5031 u64 *u64_crq = (u64 *)crq;
5034 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5035 (unsigned long)cpu_to_be64(u64_crq[0]),
5036 (unsigned long)cpu_to_be64(u64_crq[1]));
5037 switch (gen_crq->first) {
5038 case IBMVNIC_CRQ_INIT_RSP:
5039 switch (gen_crq->cmd) {
5040 case IBMVNIC_CRQ_INIT:
5041 dev_info(dev, "Partner initialized\n");
5042 adapter->from_passive_init = true;
5043 /* Discard any stale login responses from prev reset.
5044 * CHECK: should we clear even on INIT_COMPLETE?
5046 adapter->login_pending = false;
5048 if (!completion_done(&adapter->init_done)) {
5049 complete(&adapter->init_done);
5050 adapter->init_done_rc = -EIO;
5053 if (adapter->state == VNIC_DOWN)
5054 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5056 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5058 if (rc && rc != -EBUSY) {
5059 /* We were unable to schedule the failover
5060 * reset either because the adapter was still
5061 * probing (eg: during kexec) or we could not
5062 * allocate memory. Clear the failover_pending
5063 * flag since no one else will. We ignore
5064 * EBUSY because it means either FAILOVER reset
5065 * is already scheduled or the adapter is
5069 "Error %ld scheduling failover reset\n",
5071 adapter->failover_pending = false;
5074 case IBMVNIC_CRQ_INIT_COMPLETE:
5075 dev_info(dev, "Partner initialization complete\n");
5076 adapter->crq.active = true;
5077 send_version_xchg(adapter);
5080 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5083 case IBMVNIC_CRQ_XPORT_EVENT:
5084 netif_carrier_off(netdev);
5085 adapter->crq.active = false;
5086 /* terminate any thread waiting for a response
5089 if (!completion_done(&adapter->fw_done)) {
5090 adapter->fw_done_rc = -EIO;
5091 complete(&adapter->fw_done);
5093 if (!completion_done(&adapter->stats_done))
5094 complete(&adapter->stats_done);
5095 if (test_bit(0, &adapter->resetting))
5096 adapter->force_reset_recovery = true;
5097 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5098 dev_info(dev, "Migrated, re-enabling adapter\n");
5099 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5100 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5101 dev_info(dev, "Backing device failover detected\n");
5102 adapter->failover_pending = true;
5104 /* The adapter lost the connection */
5105 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5107 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5110 case IBMVNIC_CRQ_CMD_RSP:
5113 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5118 switch (gen_crq->cmd) {
5119 case VERSION_EXCHANGE_RSP:
5120 rc = crq->version_exchange_rsp.rc.code;
5122 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5126 be16_to_cpu(crq->version_exchange_rsp.version);
5127 dev_info(dev, "Partner protocol version is %d\n",
5129 send_query_cap(adapter);
5131 case QUERY_CAPABILITY_RSP:
5132 handle_query_cap_rsp(crq, adapter);
5135 handle_query_map_rsp(crq, adapter);
5137 case REQUEST_MAP_RSP:
5138 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5139 complete(&adapter->fw_done);
5141 case REQUEST_UNMAP_RSP:
5142 handle_request_unmap_rsp(crq, adapter);
5144 case REQUEST_CAPABILITY_RSP:
5145 handle_request_cap_rsp(crq, adapter);
5148 netdev_dbg(netdev, "Got Login Response\n");
5149 handle_login_rsp(crq, adapter);
5151 case LOGICAL_LINK_STATE_RSP:
5153 "Got Logical Link State Response, state: %d rc: %d\n",
5154 crq->logical_link_state_rsp.link_state,
5155 crq->logical_link_state_rsp.rc.code);
5156 adapter->logical_link_state =
5157 crq->logical_link_state_rsp.link_state;
5158 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5159 complete(&adapter->init_done);
5161 case LINK_STATE_INDICATION:
5162 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5163 adapter->phys_link_state =
5164 crq->link_state_indication.phys_link_state;
5165 adapter->logical_link_state =
5166 crq->link_state_indication.logical_link_state;
5167 if (adapter->phys_link_state && adapter->logical_link_state)
5168 netif_carrier_on(netdev);
5170 netif_carrier_off(netdev);
5172 case CHANGE_MAC_ADDR_RSP:
5173 netdev_dbg(netdev, "Got MAC address change Response\n");
5174 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5176 case ERROR_INDICATION:
5177 netdev_dbg(netdev, "Got Error Indication\n");
5178 handle_error_indication(crq, adapter);
5180 case REQUEST_STATISTICS_RSP:
5181 netdev_dbg(netdev, "Got Statistics Response\n");
5182 complete(&adapter->stats_done);
5184 case QUERY_IP_OFFLOAD_RSP:
5185 netdev_dbg(netdev, "Got Query IP offload Response\n");
5186 handle_query_ip_offload_rsp(adapter);
5188 case MULTICAST_CTRL_RSP:
5189 netdev_dbg(netdev, "Got multicast control Response\n");
5191 case CONTROL_IP_OFFLOAD_RSP:
5192 netdev_dbg(netdev, "Got Control IP offload Response\n");
5193 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5194 sizeof(adapter->ip_offload_ctrl),
5196 complete(&adapter->init_done);
5198 case COLLECT_FW_TRACE_RSP:
5199 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5200 complete(&adapter->fw_done);
5202 case GET_VPD_SIZE_RSP:
5203 handle_vpd_size_rsp(crq, adapter);
5206 handle_vpd_rsp(crq, adapter);
5208 case QUERY_PHYS_PARMS_RSP:
5209 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5210 complete(&adapter->fw_done);
5213 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5218 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5220 struct ibmvnic_adapter *adapter = instance;
5222 tasklet_schedule(&adapter->tasklet);
5226 static void ibmvnic_tasklet(struct tasklet_struct *t)
5228 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5229 struct ibmvnic_crq_queue *queue = &adapter->crq;
5230 union ibmvnic_crq *crq;
5231 unsigned long flags;
5234 spin_lock_irqsave(&queue->lock, flags);
5236 /* Pull all the valid messages off the CRQ */
5237 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5238 /* This barrier makes sure ibmvnic_next_crq()'s
5239 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5240 * before ibmvnic_handle_crq()'s
5241 * switch(gen_crq->first) and switch(gen_crq->cmd).
5244 ibmvnic_handle_crq(crq, adapter);
5245 crq->generic.first = 0;
5248 /* remain in tasklet until all
5249 * capabilities responses are received
5251 if (!adapter->wait_capability)
5254 /* if capabilities CRQ's were sent in this tasklet, the following
5255 * tasklet must wait until all responses are received
5257 if (atomic_read(&adapter->running_cap_crqs) != 0)
5258 adapter->wait_capability = true;
5259 spin_unlock_irqrestore(&queue->lock, flags);
5262 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5264 struct vio_dev *vdev = adapter->vdev;
5268 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5269 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5272 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5277 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5279 struct ibmvnic_crq_queue *crq = &adapter->crq;
5280 struct device *dev = &adapter->vdev->dev;
5281 struct vio_dev *vdev = adapter->vdev;
5286 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5287 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5289 /* Clean out the queue */
5293 memset(crq->msgs, 0, PAGE_SIZE);
5295 crq->active = false;
5297 /* And re-open it again */
5298 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5299 crq->msg_token, PAGE_SIZE);
5302 /* Adapter is good, but other end is not ready */
5303 dev_warn(dev, "Partner adapter not ready\n");
5305 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5310 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5312 struct ibmvnic_crq_queue *crq = &adapter->crq;
5313 struct vio_dev *vdev = adapter->vdev;
5319 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5320 free_irq(vdev->irq, adapter);
5321 tasklet_kill(&adapter->tasklet);
5323 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5324 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5326 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5328 free_page((unsigned long)crq->msgs);
5330 crq->active = false;
5333 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5335 struct ibmvnic_crq_queue *crq = &adapter->crq;
5336 struct device *dev = &adapter->vdev->dev;
5337 struct vio_dev *vdev = adapter->vdev;
5338 int rc, retrc = -ENOMEM;
5343 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5344 /* Should we allocate more than one page? */
5349 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5350 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5352 if (dma_mapping_error(dev, crq->msg_token))
5355 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5356 crq->msg_token, PAGE_SIZE);
5358 if (rc == H_RESOURCE)
5359 /* maybe kexecing and resource is busy. try a reset */
5360 rc = ibmvnic_reset_crq(adapter);
5363 if (rc == H_CLOSED) {
5364 dev_warn(dev, "Partner adapter not ready\n");
5366 dev_warn(dev, "Error %d opening adapter\n", rc);
5367 goto reg_crq_failed;
5372 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5374 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5375 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5376 adapter->vdev->unit_address);
5377 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5379 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5381 goto req_irq_failed;
5384 rc = vio_enable_interrupts(vdev);
5386 dev_err(dev, "Error %d enabling interrupts\n", rc);
5387 goto req_irq_failed;
5391 spin_lock_init(&crq->lock);
5396 tasklet_kill(&adapter->tasklet);
5398 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5399 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5401 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5403 free_page((unsigned long)crq->msgs);
5408 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5410 struct device *dev = &adapter->vdev->dev;
5411 unsigned long timeout = msecs_to_jiffies(20000);
5412 u64 old_num_rx_queues = adapter->req_rx_queues;
5413 u64 old_num_tx_queues = adapter->req_tx_queues;
5416 adapter->from_passive_init = false;
5419 reinit_completion(&adapter->init_done);
5421 adapter->init_done_rc = 0;
5422 rc = ibmvnic_send_crq_init(adapter);
5424 dev_err(dev, "Send crq init failed with error %d\n", rc);
5428 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5429 dev_err(dev, "Initialization sequence timed out\n");
5433 if (adapter->init_done_rc) {
5434 release_crq_queue(adapter);
5435 return adapter->init_done_rc;
5438 if (adapter->from_passive_init) {
5439 adapter->state = VNIC_OPEN;
5440 adapter->from_passive_init = false;
5445 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5446 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5447 if (adapter->req_rx_queues != old_num_rx_queues ||
5448 adapter->req_tx_queues != old_num_tx_queues) {
5449 release_sub_crqs(adapter, 0);
5450 rc = init_sub_crqs(adapter);
5452 rc = reset_sub_crq_queues(adapter);
5455 rc = init_sub_crqs(adapter);
5459 dev_err(dev, "Initialization of sub crqs failed\n");
5460 release_crq_queue(adapter);
5464 rc = init_sub_crq_irqs(adapter);
5466 dev_err(dev, "Failed to initialize sub crq irqs\n");
5467 release_crq_queue(adapter);
5473 static struct device_attribute dev_attr_failover;
5475 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5477 struct ibmvnic_adapter *adapter;
5478 struct net_device *netdev;
5479 unsigned char *mac_addr_p;
5483 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5486 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5487 VETH_MAC_ADDR, NULL);
5490 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5491 __FILE__, __LINE__);
5495 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5496 IBMVNIC_MAX_QUEUES);
5500 adapter = netdev_priv(netdev);
5501 adapter->state = VNIC_PROBING;
5502 dev_set_drvdata(&dev->dev, netdev);
5503 adapter->vdev = dev;
5504 adapter->netdev = netdev;
5505 adapter->login_pending = false;
5507 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5508 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5509 netdev->irq = dev->irq;
5510 netdev->netdev_ops = &ibmvnic_netdev_ops;
5511 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5512 SET_NETDEV_DEV(netdev, &dev->dev);
5514 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5515 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5516 __ibmvnic_delayed_reset);
5517 INIT_LIST_HEAD(&adapter->rwi_list);
5518 spin_lock_init(&adapter->rwi_lock);
5519 spin_lock_init(&adapter->state_lock);
5520 mutex_init(&adapter->fw_lock);
5521 init_completion(&adapter->init_done);
5522 init_completion(&adapter->fw_done);
5523 init_completion(&adapter->reset_done);
5524 init_completion(&adapter->stats_done);
5525 clear_bit(0, &adapter->resetting);
5527 init_success = false;
5529 rc = init_crq_queue(adapter);
5531 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5533 goto ibmvnic_init_fail;
5536 rc = ibmvnic_reset_init(adapter, false);
5537 } while (rc == EAGAIN);
5539 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
5540 * partner is not ready. CRQ is not active. When the partner becomes
5541 * ready, we will do the passive init reset.
5545 init_success = true;
5547 rc = init_stats_buffers(adapter);
5549 goto ibmvnic_init_fail;
5551 rc = init_stats_token(adapter);
5553 goto ibmvnic_stats_fail;
5555 rc = device_create_file(&dev->dev, &dev_attr_failover);
5557 goto ibmvnic_dev_file_err;
5559 netif_carrier_off(netdev);
5560 rc = register_netdev(netdev);
5562 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5563 goto ibmvnic_register_fail;
5565 dev_info(&dev->dev, "ibmvnic registered\n");
5568 adapter->state = VNIC_PROBED;
5569 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5570 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5571 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5573 adapter->state = VNIC_DOWN;
5576 adapter->wait_for_reset = false;
5577 adapter->last_reset_time = jiffies;
5580 ibmvnic_register_fail:
5581 device_remove_file(&dev->dev, &dev_attr_failover);
5583 ibmvnic_dev_file_err:
5584 release_stats_token(adapter);
5587 release_stats_buffers(adapter);
5590 release_sub_crqs(adapter, 1);
5591 release_crq_queue(adapter);
5592 mutex_destroy(&adapter->fw_lock);
5593 free_netdev(netdev);
5598 static void ibmvnic_remove(struct vio_dev *dev)
5600 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5601 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5602 unsigned long flags;
5604 spin_lock_irqsave(&adapter->state_lock, flags);
5606 /* If ibmvnic_reset() is scheduling a reset, wait for it to
5607 * finish. Then, set the state to REMOVING to prevent it from
5608 * scheduling any more work and to have reset functions ignore
5609 * any resets that have already been scheduled. Drop the lock
5610 * after setting state, so __ibmvnic_reset() which is called
5611 * from the flush_work() below, can make progress.
5613 spin_lock(&adapter->rwi_lock);
5614 adapter->state = VNIC_REMOVING;
5615 spin_unlock(&adapter->rwi_lock);
5617 spin_unlock_irqrestore(&adapter->state_lock, flags);
5619 flush_work(&adapter->ibmvnic_reset);
5620 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5623 unregister_netdevice(netdev);
5625 release_resources(adapter);
5626 release_sub_crqs(adapter, 1);
5627 release_crq_queue(adapter);
5629 release_stats_token(adapter);
5630 release_stats_buffers(adapter);
5632 adapter->state = VNIC_REMOVED;
5635 mutex_destroy(&adapter->fw_lock);
5636 device_remove_file(&dev->dev, &dev_attr_failover);
5637 free_netdev(netdev);
5638 dev_set_drvdata(&dev->dev, NULL);
5641 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5642 const char *buf, size_t count)
5644 struct net_device *netdev = dev_get_drvdata(dev);
5645 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5646 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5647 __be64 session_token;
5650 if (!sysfs_streq(buf, "1"))
5653 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5654 H_GET_SESSION_TOKEN, 0, 0, 0);
5656 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5661 session_token = (__be64)retbuf[0];
5662 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5663 be64_to_cpu(session_token));
5664 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5665 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5668 "H_VIOCTL initiated failover failed, rc %ld\n",
5672 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
5673 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5677 static DEVICE_ATTR_WO(failover);
5679 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5681 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5682 struct ibmvnic_adapter *adapter;
5683 struct iommu_table *tbl;
5684 unsigned long ret = 0;
5687 tbl = get_iommu_table_base(&vdev->dev);
5689 /* netdev inits at probe time along with the structures we need below*/
5691 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5693 adapter = netdev_priv(netdev);
5695 ret += PAGE_SIZE; /* the crq message queue */
5696 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5698 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5699 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5701 for (i = 0; i < adapter->num_active_rx_pools; i++)
5702 ret += adapter->rx_pool[i].size *
5703 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5708 static int ibmvnic_resume(struct device *dev)
5710 struct net_device *netdev = dev_get_drvdata(dev);
5711 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5713 if (adapter->state != VNIC_OPEN)
5716 tasklet_schedule(&adapter->tasklet);
5721 static const struct vio_device_id ibmvnic_device_table[] = {
5722 {"network", "IBM,vnic"},
5725 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5727 static const struct dev_pm_ops ibmvnic_pm_ops = {
5728 .resume = ibmvnic_resume
5731 static struct vio_driver ibmvnic_driver = {
5732 .id_table = ibmvnic_device_table,
5733 .probe = ibmvnic_probe,
5734 .remove = ibmvnic_remove,
5735 .get_desired_dma = ibmvnic_get_desired_dma,
5736 .name = ibmvnic_driver_name,
5737 .pm = &ibmvnic_pm_ops,
5740 /* module functions */
5741 static int __init ibmvnic_module_init(void)
5743 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5744 IBMVNIC_DRIVER_VERSION);
5746 return vio_register_driver(&ibmvnic_driver);
5749 static void __exit ibmvnic_module_exit(void)
5751 vio_unregister_driver(&ibmvnic_driver);
5754 module_init(ibmvnic_module_init);
5755 module_exit(ibmvnic_module_exit);