7be4b06f69d2176f7946797973aafea69656ad60
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / ibm / ibmvnic.c
1 /**************************************************************************/
2 /*                                                                        */
3 /*  IBM System i and System p Virtual NIC Device Driver                   */
4 /*  Copyright (C) 2014 IBM Corp.                                          */
5 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
6 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
7 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
8 /*                                                                        */
9 /*  This program is free software; you can redistribute it and/or modify  */
10 /*  it under the terms of the GNU General Public License as published by  */
11 /*  the Free Software Foundation; either version 2 of the License, or     */
12 /*  (at your option) any later version.                                   */
13 /*                                                                        */
14 /*  This program is distributed in the hope that it will be useful,       */
15 /*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
16 /*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
17 /*  GNU General Public License for more details.                          */
18 /*                                                                        */
19 /*  You should have received a copy of the GNU General Public License     */
20 /*  along with this program.                                              */
21 /*                                                                        */
22 /* This module contains the implementation of a virtual ethernet device   */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
24 /* option of the RS/6000 Platform Architecture to interface with virtual  */
25 /* ethernet NICs that are presented to the partition by the hypervisor.   */
26 /*                                                                         */
27 /* Messages are passed between the VNIC driver and the VNIC server using  */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
31 /* are used by the driver to notify the server that a packet is           */
32 /* ready for transmission or that a buffer has been added to receive a    */
33 /* packet. Subsequently, sCRQs are used by the server to notify the       */
34 /* driver that a packet transmission has been completed or that a packet  */
35 /* has been received and placed in a waiting buffer.                      */
36 /*                                                                        */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
39 /* or receive has been completed, the VNIC driver is required to use      */
40 /* "long term mapping". This entails that large, continuous DMA mapped    */
41 /* buffers are allocated on driver initialization and these buffers are   */
42 /* then continuously reused to pass skbs to and from the VNIC server.     */
43 /*                                                                        */
44 /**************************************************************************/
45
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/mm.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/if_arp.h>
63 #include <linux/in.h>
64 #include <linux/ip.h>
65 #include <linux/ipv6.h>
66 #include <linux/irq.h>
67 #include <linux/kthread.h>
68 #include <linux/seq_file.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
73 #include <asm/vio.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
78 #include <linux/if_vlan.h>
79 #include <linux/utsname.h>
80
81 #include "ibmvnic.h"
82
83 static const char ibmvnic_driver_name[] = "ibmvnic";
84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
85
86 MODULE_AUTHOR("Santiago Leon");
87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
90
91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92 static int ibmvnic_remove(struct vio_dev *);
93 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99                        union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103                            struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105                             struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107                         struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109                                         struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static void send_request_unmap(struct ibmvnic_adapter *, u8);
114 static int send_login(struct ibmvnic_adapter *adapter);
115 static void send_cap_queries(struct ibmvnic_adapter *adapter);
116 static int init_sub_crqs(struct ibmvnic_adapter *);
117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
118 static int ibmvnic_init(struct ibmvnic_adapter *);
119 static void release_crq_queue(struct ibmvnic_adapter *);
120 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
121
122 struct ibmvnic_stat {
123         char name[ETH_GSTRING_LEN];
124         int offset;
125 };
126
127 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
128                              offsetof(struct ibmvnic_statistics, stat))
129 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
130
131 static const struct ibmvnic_stat ibmvnic_stats[] = {
132         {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
133         {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
134         {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
135         {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
136         {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
137         {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
138         {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
139         {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
140         {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
141         {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
142         {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
143         {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
144         {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
145         {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
146         {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
147         {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
148         {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
149         {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
150         {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
151         {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
152         {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
153         {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
154 };
155
156 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
157                           unsigned long length, unsigned long *number,
158                           unsigned long *irq)
159 {
160         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
161         long rc;
162
163         rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
164         *number = retbuf[0];
165         *irq = retbuf[1];
166
167         return rc;
168 }
169
170 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
171                                 struct ibmvnic_long_term_buff *ltb, int size)
172 {
173         struct device *dev = &adapter->vdev->dev;
174
175         ltb->size = size;
176         ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
177                                        GFP_KERNEL);
178
179         if (!ltb->buff) {
180                 dev_err(dev, "Couldn't alloc long term buffer\n");
181                 return -ENOMEM;
182         }
183         ltb->map_id = adapter->map_id;
184         adapter->map_id++;
185
186         init_completion(&adapter->fw_done);
187         send_request_map(adapter, ltb->addr,
188                          ltb->size, ltb->map_id);
189         wait_for_completion(&adapter->fw_done);
190
191         if (adapter->fw_done_rc) {
192                 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
193                         adapter->fw_done_rc);
194                 return -1;
195         }
196         return 0;
197 }
198
199 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
200                                 struct ibmvnic_long_term_buff *ltb)
201 {
202         struct device *dev = &adapter->vdev->dev;
203
204         if (!ltb->buff)
205                 return;
206
207         if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
208             adapter->reset_reason != VNIC_RESET_MOBILITY)
209                 send_request_unmap(adapter, ltb->map_id);
210         dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
211 }
212
213 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
214                                 struct ibmvnic_long_term_buff *ltb)
215 {
216         memset(ltb->buff, 0, ltb->size);
217
218         init_completion(&adapter->fw_done);
219         send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
220         wait_for_completion(&adapter->fw_done);
221
222         if (adapter->fw_done_rc) {
223                 dev_info(&adapter->vdev->dev,
224                          "Reset failed, attempting to free and reallocate buffer\n");
225                 free_long_term_buff(adapter, ltb);
226                 return alloc_long_term_buff(adapter, ltb, ltb->size);
227         }
228         return 0;
229 }
230
231 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
232 {
233         int i;
234
235         for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
236              i++)
237                 adapter->rx_pool[i].active = 0;
238 }
239
240 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
241                               struct ibmvnic_rx_pool *pool)
242 {
243         int count = pool->size - atomic_read(&pool->available);
244         struct device *dev = &adapter->vdev->dev;
245         int buffers_added = 0;
246         unsigned long lpar_rc;
247         union sub_crq sub_crq;
248         struct sk_buff *skb;
249         unsigned int offset;
250         dma_addr_t dma_addr;
251         unsigned char *dst;
252         u64 *handle_array;
253         int shift = 0;
254         int index;
255         int i;
256
257         if (!pool->active)
258                 return;
259
260         handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
261                                       be32_to_cpu(adapter->login_rsp_buf->
262                                       off_rxadd_subcrqs));
263
264         for (i = 0; i < count; ++i) {
265                 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
266                 if (!skb) {
267                         dev_err(dev, "Couldn't replenish rx buff\n");
268                         adapter->replenish_no_mem++;
269                         break;
270                 }
271
272                 index = pool->free_map[pool->next_free];
273
274                 if (pool->rx_buff[index].skb)
275                         dev_err(dev, "Inconsistent free_map!\n");
276
277                 /* Copy the skb to the long term mapped DMA buffer */
278                 offset = index * pool->buff_size;
279                 dst = pool->long_term_buff.buff + offset;
280                 memset(dst, 0, pool->buff_size);
281                 dma_addr = pool->long_term_buff.addr + offset;
282                 pool->rx_buff[index].data = dst;
283
284                 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
285                 pool->rx_buff[index].dma = dma_addr;
286                 pool->rx_buff[index].skb = skb;
287                 pool->rx_buff[index].pool_index = pool->index;
288                 pool->rx_buff[index].size = pool->buff_size;
289
290                 memset(&sub_crq, 0, sizeof(sub_crq));
291                 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
292                 sub_crq.rx_add.correlator =
293                     cpu_to_be64((u64)&pool->rx_buff[index]);
294                 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
295                 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
296
297                 /* The length field of the sCRQ is defined to be 24 bits so the
298                  * buffer size needs to be left shifted by a byte before it is
299                  * converted to big endian to prevent the last byte from being
300                  * truncated.
301                  */
302 #ifdef __LITTLE_ENDIAN__
303                 shift = 8;
304 #endif
305                 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
306
307                 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
308                                       &sub_crq);
309                 if (lpar_rc != H_SUCCESS)
310                         goto failure;
311
312                 buffers_added++;
313                 adapter->replenish_add_buff_success++;
314                 pool->next_free = (pool->next_free + 1) % pool->size;
315         }
316         atomic_add(buffers_added, &pool->available);
317         return;
318
319 failure:
320         dev_info(dev, "replenish pools failure\n");
321         pool->free_map[pool->next_free] = index;
322         pool->rx_buff[index].skb = NULL;
323         if (!dma_mapping_error(dev, dma_addr))
324                 dma_unmap_single(dev, dma_addr, pool->buff_size,
325                                  DMA_FROM_DEVICE);
326
327         dev_kfree_skb_any(skb);
328         adapter->replenish_add_buff_failure++;
329         atomic_add(buffers_added, &pool->available);
330
331         if (lpar_rc == H_CLOSED) {
332                 /* Disable buffer pool replenishment and report carrier off if
333                  * queue is closed. Firmware guarantees that a signal will
334                  * be sent to the driver, triggering a reset.
335                  */
336                 deactivate_rx_pools(adapter);
337                 netif_carrier_off(adapter->netdev);
338         }
339 }
340
341 static void replenish_pools(struct ibmvnic_adapter *adapter)
342 {
343         int i;
344
345         adapter->replenish_task_cycles++;
346         for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
347              i++) {
348                 if (adapter->rx_pool[i].active)
349                         replenish_rx_pool(adapter, &adapter->rx_pool[i]);
350         }
351 }
352
353 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
354 {
355         kfree(adapter->tx_stats_buffers);
356         kfree(adapter->rx_stats_buffers);
357         adapter->tx_stats_buffers = NULL;
358         adapter->rx_stats_buffers = NULL;
359 }
360
361 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
362 {
363         adapter->tx_stats_buffers =
364                                 kcalloc(IBMVNIC_MAX_QUEUES,
365                                         sizeof(struct ibmvnic_tx_queue_stats),
366                                         GFP_KERNEL);
367         if (!adapter->tx_stats_buffers)
368                 return -ENOMEM;
369
370         adapter->rx_stats_buffers =
371                                 kcalloc(IBMVNIC_MAX_QUEUES,
372                                         sizeof(struct ibmvnic_rx_queue_stats),
373                                         GFP_KERNEL);
374         if (!adapter->rx_stats_buffers)
375                 return -ENOMEM;
376
377         return 0;
378 }
379
380 static void release_stats_token(struct ibmvnic_adapter *adapter)
381 {
382         struct device *dev = &adapter->vdev->dev;
383
384         if (!adapter->stats_token)
385                 return;
386
387         dma_unmap_single(dev, adapter->stats_token,
388                          sizeof(struct ibmvnic_statistics),
389                          DMA_FROM_DEVICE);
390         adapter->stats_token = 0;
391 }
392
393 static int init_stats_token(struct ibmvnic_adapter *adapter)
394 {
395         struct device *dev = &adapter->vdev->dev;
396         dma_addr_t stok;
397
398         stok = dma_map_single(dev, &adapter->stats,
399                               sizeof(struct ibmvnic_statistics),
400                               DMA_FROM_DEVICE);
401         if (dma_mapping_error(dev, stok)) {
402                 dev_err(dev, "Couldn't map stats buffer\n");
403                 return -1;
404         }
405
406         adapter->stats_token = stok;
407         netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
408         return 0;
409 }
410
411 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
412 {
413         struct ibmvnic_rx_pool *rx_pool;
414         int rx_scrqs;
415         int i, j, rc;
416         u64 *size_array;
417
418         size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
419                 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
420
421         rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
422         for (i = 0; i < rx_scrqs; i++) {
423                 rx_pool = &adapter->rx_pool[i];
424
425                 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
426
427                 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
428                         free_long_term_buff(adapter, &rx_pool->long_term_buff);
429                         rx_pool->buff_size = be64_to_cpu(size_array[i]);
430                         alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
431                                              rx_pool->size *
432                                              rx_pool->buff_size);
433                 } else {
434                         rc = reset_long_term_buff(adapter,
435                                                   &rx_pool->long_term_buff);
436                 }
437
438                 if (rc)
439                         return rc;
440
441                 for (j = 0; j < rx_pool->size; j++)
442                         rx_pool->free_map[j] = j;
443
444                 memset(rx_pool->rx_buff, 0,
445                        rx_pool->size * sizeof(struct ibmvnic_rx_buff));
446
447                 atomic_set(&rx_pool->available, 0);
448                 rx_pool->next_alloc = 0;
449                 rx_pool->next_free = 0;
450                 rx_pool->active = 1;
451         }
452
453         return 0;
454 }
455
456 static void release_rx_pools(struct ibmvnic_adapter *adapter)
457 {
458         struct ibmvnic_rx_pool *rx_pool;
459         int i, j;
460
461         if (!adapter->rx_pool)
462                 return;
463
464         for (i = 0; i < adapter->num_active_rx_pools; i++) {
465                 rx_pool = &adapter->rx_pool[i];
466
467                 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
468
469                 kfree(rx_pool->free_map);
470                 free_long_term_buff(adapter, &rx_pool->long_term_buff);
471
472                 if (!rx_pool->rx_buff)
473                         continue;
474
475                 for (j = 0; j < rx_pool->size; j++) {
476                         if (rx_pool->rx_buff[j].skb) {
477                                 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
478                                 rx_pool->rx_buff[i].skb = NULL;
479                         }
480                 }
481
482                 kfree(rx_pool->rx_buff);
483         }
484
485         kfree(adapter->rx_pool);
486         adapter->rx_pool = NULL;
487         adapter->num_active_rx_pools = 0;
488 }
489
490 static int init_rx_pools(struct net_device *netdev)
491 {
492         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
493         struct device *dev = &adapter->vdev->dev;
494         struct ibmvnic_rx_pool *rx_pool;
495         int rxadd_subcrqs;
496         u64 *size_array;
497         int i, j;
498
499         rxadd_subcrqs =
500                 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
501         size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
502                 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
503
504         adapter->rx_pool = kcalloc(rxadd_subcrqs,
505                                    sizeof(struct ibmvnic_rx_pool),
506                                    GFP_KERNEL);
507         if (!adapter->rx_pool) {
508                 dev_err(dev, "Failed to allocate rx pools\n");
509                 return -1;
510         }
511
512         adapter->num_active_rx_pools = rxadd_subcrqs;
513
514         for (i = 0; i < rxadd_subcrqs; i++) {
515                 rx_pool = &adapter->rx_pool[i];
516
517                 netdev_dbg(adapter->netdev,
518                            "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
519                            i, adapter->req_rx_add_entries_per_subcrq,
520                            be64_to_cpu(size_array[i]));
521
522                 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
523                 rx_pool->index = i;
524                 rx_pool->buff_size = be64_to_cpu(size_array[i]);
525                 rx_pool->active = 1;
526
527                 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
528                                             GFP_KERNEL);
529                 if (!rx_pool->free_map) {
530                         release_rx_pools(adapter);
531                         return -1;
532                 }
533
534                 rx_pool->rx_buff = kcalloc(rx_pool->size,
535                                            sizeof(struct ibmvnic_rx_buff),
536                                            GFP_KERNEL);
537                 if (!rx_pool->rx_buff) {
538                         dev_err(dev, "Couldn't alloc rx buffers\n");
539                         release_rx_pools(adapter);
540                         return -1;
541                 }
542
543                 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
544                                          rx_pool->size * rx_pool->buff_size)) {
545                         release_rx_pools(adapter);
546                         return -1;
547                 }
548
549                 for (j = 0; j < rx_pool->size; ++j)
550                         rx_pool->free_map[j] = j;
551
552                 atomic_set(&rx_pool->available, 0);
553                 rx_pool->next_alloc = 0;
554                 rx_pool->next_free = 0;
555         }
556
557         return 0;
558 }
559
560 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
561 {
562         struct ibmvnic_tx_pool *tx_pool;
563         int tx_scrqs;
564         int i, j, rc;
565
566         tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
567         for (i = 0; i < tx_scrqs; i++) {
568                 netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i);
569
570                 tx_pool = &adapter->tx_pool[i];
571
572                 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
573                 if (rc)
574                         return rc;
575
576                 rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb);
577                 if (rc)
578                         return rc;
579
580                 memset(tx_pool->tx_buff, 0,
581                        adapter->req_tx_entries_per_subcrq *
582                        sizeof(struct ibmvnic_tx_buff));
583
584                 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
585                         tx_pool->free_map[j] = j;
586
587                 tx_pool->consumer_index = 0;
588                 tx_pool->producer_index = 0;
589                 tx_pool->tso_index = 0;
590         }
591
592         return 0;
593 }
594
595 static void release_vpd_data(struct ibmvnic_adapter *adapter)
596 {
597         if (!adapter->vpd)
598                 return;
599
600         kfree(adapter->vpd->buff);
601         kfree(adapter->vpd);
602
603         adapter->vpd = NULL;
604 }
605
606 static void release_tx_pools(struct ibmvnic_adapter *adapter)
607 {
608         struct ibmvnic_tx_pool *tx_pool;
609         int i;
610
611         if (!adapter->tx_pool)
612                 return;
613
614         for (i = 0; i < adapter->num_active_tx_pools; i++) {
615                 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
616                 tx_pool = &adapter->tx_pool[i];
617                 kfree(tx_pool->tx_buff);
618                 free_long_term_buff(adapter, &tx_pool->long_term_buff);
619                 free_long_term_buff(adapter, &tx_pool->tso_ltb);
620                 kfree(tx_pool->free_map);
621         }
622
623         kfree(adapter->tx_pool);
624         adapter->tx_pool = NULL;
625         adapter->num_active_tx_pools = 0;
626 }
627
628 static int init_tx_pools(struct net_device *netdev)
629 {
630         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
631         struct device *dev = &adapter->vdev->dev;
632         struct ibmvnic_tx_pool *tx_pool;
633         int tx_subcrqs;
634         int i, j;
635
636         tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
637         adapter->tx_pool = kcalloc(tx_subcrqs,
638                                    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
639         if (!adapter->tx_pool)
640                 return -1;
641
642         adapter->num_active_tx_pools = tx_subcrqs;
643
644         for (i = 0; i < tx_subcrqs; i++) {
645                 tx_pool = &adapter->tx_pool[i];
646
647                 netdev_dbg(adapter->netdev,
648                            "Initializing tx_pool[%d], %lld buffs\n",
649                            i, adapter->req_tx_entries_per_subcrq);
650
651                 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
652                                            sizeof(struct ibmvnic_tx_buff),
653                                            GFP_KERNEL);
654                 if (!tx_pool->tx_buff) {
655                         dev_err(dev, "tx pool buffer allocation failed\n");
656                         release_tx_pools(adapter);
657                         return -1;
658                 }
659
660                 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
661                                          adapter->req_tx_entries_per_subcrq *
662                                          adapter->req_mtu)) {
663                         release_tx_pools(adapter);
664                         return -1;
665                 }
666
667                 /* alloc TSO ltb */
668                 if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb,
669                                          IBMVNIC_TSO_BUFS *
670                                          IBMVNIC_TSO_BUF_SZ)) {
671                         release_tx_pools(adapter);
672                         return -1;
673                 }
674
675                 tx_pool->tso_index = 0;
676
677                 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
678                                             sizeof(int), GFP_KERNEL);
679                 if (!tx_pool->free_map) {
680                         release_tx_pools(adapter);
681                         return -1;
682                 }
683
684                 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
685                         tx_pool->free_map[j] = j;
686
687                 tx_pool->consumer_index = 0;
688                 tx_pool->producer_index = 0;
689         }
690
691         return 0;
692 }
693
694 static void release_error_buffers(struct ibmvnic_adapter *adapter)
695 {
696         struct device *dev = &adapter->vdev->dev;
697         struct ibmvnic_error_buff *error_buff, *tmp;
698         unsigned long flags;
699
700         spin_lock_irqsave(&adapter->error_list_lock, flags);
701         list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
702                 list_del(&error_buff->list);
703                 dma_unmap_single(dev, error_buff->dma, error_buff->len,
704                                  DMA_FROM_DEVICE);
705                 kfree(error_buff->buff);
706                 kfree(error_buff);
707         }
708         spin_unlock_irqrestore(&adapter->error_list_lock, flags);
709 }
710
711 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
712 {
713         int i;
714
715         if (adapter->napi_enabled)
716                 return;
717
718         for (i = 0; i < adapter->req_rx_queues; i++)
719                 napi_enable(&adapter->napi[i]);
720
721         adapter->napi_enabled = true;
722 }
723
724 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
725 {
726         int i;
727
728         if (!adapter->napi_enabled)
729                 return;
730
731         for (i = 0; i < adapter->req_rx_queues; i++) {
732                 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
733                 napi_disable(&adapter->napi[i]);
734         }
735
736         adapter->napi_enabled = false;
737 }
738
739 static int init_napi(struct ibmvnic_adapter *adapter)
740 {
741         int i;
742
743         adapter->napi = kcalloc(adapter->req_rx_queues,
744                                 sizeof(struct napi_struct), GFP_KERNEL);
745         if (!adapter->napi)
746                 return -ENOMEM;
747
748         for (i = 0; i < adapter->req_rx_queues; i++) {
749                 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
750                 netif_napi_add(adapter->netdev, &adapter->napi[i],
751                                ibmvnic_poll, NAPI_POLL_WEIGHT);
752         }
753
754         adapter->num_active_rx_napi = adapter->req_rx_queues;
755         return 0;
756 }
757
758 static void release_napi(struct ibmvnic_adapter *adapter)
759 {
760         int i;
761
762         if (!adapter->napi)
763                 return;
764
765         for (i = 0; i < adapter->num_active_rx_napi; i++) {
766                 if (&adapter->napi[i]) {
767                         netdev_dbg(adapter->netdev,
768                                    "Releasing napi[%d]\n", i);
769                         netif_napi_del(&adapter->napi[i]);
770                 }
771         }
772
773         kfree(adapter->napi);
774         adapter->napi = NULL;
775         adapter->num_active_rx_napi = 0;
776 }
777
778 static int ibmvnic_login(struct net_device *netdev)
779 {
780         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
781         unsigned long timeout = msecs_to_jiffies(30000);
782         struct device *dev = &adapter->vdev->dev;
783         int rc;
784
785         do {
786                 if (adapter->renegotiate) {
787                         adapter->renegotiate = false;
788                         release_sub_crqs(adapter, 1);
789
790                         reinit_completion(&adapter->init_done);
791                         send_cap_queries(adapter);
792                         if (!wait_for_completion_timeout(&adapter->init_done,
793                                                          timeout)) {
794                                 dev_err(dev, "Capabilities query timeout\n");
795                                 return -1;
796                         }
797                         rc = init_sub_crqs(adapter);
798                         if (rc) {
799                                 dev_err(dev,
800                                         "Initialization of SCRQ's failed\n");
801                                 return -1;
802                         }
803                         rc = init_sub_crq_irqs(adapter);
804                         if (rc) {
805                                 dev_err(dev,
806                                         "Initialization of SCRQ's irqs failed\n");
807                                 return -1;
808                         }
809                 }
810
811                 reinit_completion(&adapter->init_done);
812                 rc = send_login(adapter);
813                 if (rc) {
814                         dev_err(dev, "Unable to attempt device login\n");
815                         return rc;
816                 } else if (!wait_for_completion_timeout(&adapter->init_done,
817                                                  timeout)) {
818                         dev_err(dev, "Login timeout\n");
819                         return -1;
820                 }
821         } while (adapter->renegotiate);
822
823         /* handle pending MAC address changes after successful login */
824         if (adapter->mac_change_pending) {
825                 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
826                 adapter->mac_change_pending = false;
827         }
828
829         return 0;
830 }
831
832 static void release_login_buffer(struct ibmvnic_adapter *adapter)
833 {
834         kfree(adapter->login_buf);
835         adapter->login_buf = NULL;
836 }
837
838 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
839 {
840         kfree(adapter->login_rsp_buf);
841         adapter->login_rsp_buf = NULL;
842 }
843
844 static void release_resources(struct ibmvnic_adapter *adapter)
845 {
846         release_vpd_data(adapter);
847
848         release_tx_pools(adapter);
849         release_rx_pools(adapter);
850
851         release_error_buffers(adapter);
852         release_napi(adapter);
853         release_login_rsp_buffer(adapter);
854 }
855
856 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
857 {
858         struct net_device *netdev = adapter->netdev;
859         unsigned long timeout = msecs_to_jiffies(30000);
860         union ibmvnic_crq crq;
861         bool resend;
862         int rc;
863
864         netdev_dbg(netdev, "setting link state %d\n", link_state);
865
866         memset(&crq, 0, sizeof(crq));
867         crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
868         crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
869         crq.logical_link_state.link_state = link_state;
870
871         do {
872                 resend = false;
873
874                 reinit_completion(&adapter->init_done);
875                 rc = ibmvnic_send_crq(adapter, &crq);
876                 if (rc) {
877                         netdev_err(netdev, "Failed to set link state\n");
878                         return rc;
879                 }
880
881                 if (!wait_for_completion_timeout(&adapter->init_done,
882                                                  timeout)) {
883                         netdev_err(netdev, "timeout setting link state\n");
884                         return -1;
885                 }
886
887                 if (adapter->init_done_rc == 1) {
888                         /* Partuial success, delay and re-send */
889                         mdelay(1000);
890                         resend = true;
891                 }
892         } while (resend);
893
894         return 0;
895 }
896
897 static int set_real_num_queues(struct net_device *netdev)
898 {
899         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
900         int rc;
901
902         netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
903                    adapter->req_tx_queues, adapter->req_rx_queues);
904
905         rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
906         if (rc) {
907                 netdev_err(netdev, "failed to set the number of tx queues\n");
908                 return rc;
909         }
910
911         rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
912         if (rc)
913                 netdev_err(netdev, "failed to set the number of rx queues\n");
914
915         return rc;
916 }
917
918 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
919 {
920         struct device *dev = &adapter->vdev->dev;
921         union ibmvnic_crq crq;
922         int len = 0;
923
924         if (adapter->vpd->buff)
925                 len = adapter->vpd->len;
926
927         init_completion(&adapter->fw_done);
928         crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
929         crq.get_vpd_size.cmd = GET_VPD_SIZE;
930         ibmvnic_send_crq(adapter, &crq);
931         wait_for_completion(&adapter->fw_done);
932
933         if (!adapter->vpd->len)
934                 return -ENODATA;
935
936         if (!adapter->vpd->buff)
937                 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
938         else if (adapter->vpd->len != len)
939                 adapter->vpd->buff =
940                         krealloc(adapter->vpd->buff,
941                                  adapter->vpd->len, GFP_KERNEL);
942
943         if (!adapter->vpd->buff) {
944                 dev_err(dev, "Could allocate VPD buffer\n");
945                 return -ENOMEM;
946         }
947
948         adapter->vpd->dma_addr =
949                 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
950                                DMA_FROM_DEVICE);
951         if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
952                 dev_err(dev, "Could not map VPD buffer\n");
953                 kfree(adapter->vpd->buff);
954                 adapter->vpd->buff = NULL;
955                 return -ENOMEM;
956         }
957
958         reinit_completion(&adapter->fw_done);
959         crq.get_vpd.first = IBMVNIC_CRQ_CMD;
960         crq.get_vpd.cmd = GET_VPD;
961         crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
962         crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
963         ibmvnic_send_crq(adapter, &crq);
964         wait_for_completion(&adapter->fw_done);
965
966         return 0;
967 }
968
969 static int init_resources(struct ibmvnic_adapter *adapter)
970 {
971         struct net_device *netdev = adapter->netdev;
972         int rc;
973
974         rc = set_real_num_queues(netdev);
975         if (rc)
976                 return rc;
977
978         adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
979         if (!adapter->vpd)
980                 return -ENOMEM;
981
982         /* Vital Product Data (VPD) */
983         rc = ibmvnic_get_vpd(adapter);
984         if (rc) {
985                 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
986                 return rc;
987         }
988
989         adapter->map_id = 1;
990
991         rc = init_napi(adapter);
992         if (rc)
993                 return rc;
994
995         send_map_query(adapter);
996
997         rc = init_rx_pools(netdev);
998         if (rc)
999                 return rc;
1000
1001         rc = init_tx_pools(netdev);
1002         return rc;
1003 }
1004
1005 static int __ibmvnic_open(struct net_device *netdev)
1006 {
1007         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1008         enum vnic_state prev_state = adapter->state;
1009         int i, rc;
1010
1011         adapter->state = VNIC_OPENING;
1012         replenish_pools(adapter);
1013         ibmvnic_napi_enable(adapter);
1014
1015         /* We're ready to receive frames, enable the sub-crq interrupts and
1016          * set the logical link state to up
1017          */
1018         for (i = 0; i < adapter->req_rx_queues; i++) {
1019                 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1020                 if (prev_state == VNIC_CLOSED)
1021                         enable_irq(adapter->rx_scrq[i]->irq);
1022                 else
1023                         enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1024         }
1025
1026         for (i = 0; i < adapter->req_tx_queues; i++) {
1027                 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1028                 if (prev_state == VNIC_CLOSED)
1029                         enable_irq(adapter->tx_scrq[i]->irq);
1030                 else
1031                         enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1032         }
1033
1034         rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1035         if (rc) {
1036                 for (i = 0; i < adapter->req_rx_queues; i++)
1037                         napi_disable(&adapter->napi[i]);
1038                 release_resources(adapter);
1039                 return rc;
1040         }
1041
1042         netif_tx_start_all_queues(netdev);
1043
1044         if (prev_state == VNIC_CLOSED) {
1045                 for (i = 0; i < adapter->req_rx_queues; i++)
1046                         napi_schedule(&adapter->napi[i]);
1047         }
1048
1049         adapter->state = VNIC_OPEN;
1050         return rc;
1051 }
1052
1053 static int ibmvnic_open(struct net_device *netdev)
1054 {
1055         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1056         int rc;
1057
1058         mutex_lock(&adapter->reset_lock);
1059
1060         if (adapter->state != VNIC_CLOSED) {
1061                 rc = ibmvnic_login(netdev);
1062                 if (rc) {
1063                         mutex_unlock(&adapter->reset_lock);
1064                         return rc;
1065                 }
1066
1067                 rc = init_resources(adapter);
1068                 if (rc) {
1069                         netdev_err(netdev, "failed to initialize resources\n");
1070                         release_resources(adapter);
1071                         mutex_unlock(&adapter->reset_lock);
1072                         return rc;
1073                 }
1074         }
1075
1076         rc = __ibmvnic_open(netdev);
1077         netif_carrier_on(netdev);
1078
1079         mutex_unlock(&adapter->reset_lock);
1080
1081         return rc;
1082 }
1083
1084 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1085 {
1086         struct ibmvnic_rx_pool *rx_pool;
1087         struct ibmvnic_rx_buff *rx_buff;
1088         u64 rx_entries;
1089         int rx_scrqs;
1090         int i, j;
1091
1092         if (!adapter->rx_pool)
1093                 return;
1094
1095         rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
1096         rx_entries = adapter->req_rx_add_entries_per_subcrq;
1097
1098         /* Free any remaining skbs in the rx buffer pools */
1099         for (i = 0; i < rx_scrqs; i++) {
1100                 rx_pool = &adapter->rx_pool[i];
1101                 if (!rx_pool || !rx_pool->rx_buff)
1102                         continue;
1103
1104                 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1105                 for (j = 0; j < rx_entries; j++) {
1106                         rx_buff = &rx_pool->rx_buff[j];
1107                         if (rx_buff && rx_buff->skb) {
1108                                 dev_kfree_skb_any(rx_buff->skb);
1109                                 rx_buff->skb = NULL;
1110                         }
1111                 }
1112         }
1113 }
1114
1115 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1116 {
1117         struct ibmvnic_tx_pool *tx_pool;
1118         struct ibmvnic_tx_buff *tx_buff;
1119         u64 tx_entries;
1120         int tx_scrqs;
1121         int i, j;
1122
1123         if (!adapter->tx_pool)
1124                 return;
1125
1126         tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
1127         tx_entries = adapter->req_tx_entries_per_subcrq;
1128
1129         /* Free any remaining skbs in the tx buffer pools */
1130         for (i = 0; i < tx_scrqs; i++) {
1131                 tx_pool = &adapter->tx_pool[i];
1132                 if (!tx_pool && !tx_pool->tx_buff)
1133                         continue;
1134
1135                 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1136                 for (j = 0; j < tx_entries; j++) {
1137                         tx_buff = &tx_pool->tx_buff[j];
1138                         if (tx_buff && tx_buff->skb) {
1139                                 dev_kfree_skb_any(tx_buff->skb);
1140                                 tx_buff->skb = NULL;
1141                         }
1142                 }
1143         }
1144 }
1145
1146 static void ibmvnic_cleanup(struct net_device *netdev)
1147 {
1148         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1149         int i;
1150
1151         /* ensure that transmissions are stopped if called by do_reset */
1152         if (adapter->resetting)
1153                 netif_tx_disable(netdev);
1154         else
1155                 netif_tx_stop_all_queues(netdev);
1156
1157         ibmvnic_napi_disable(adapter);
1158
1159         if (adapter->tx_scrq) {
1160                 for (i = 0; i < adapter->req_tx_queues; i++)
1161                         if (adapter->tx_scrq[i]->irq) {
1162                                 netdev_dbg(netdev,
1163                                            "Disabling tx_scrq[%d] irq\n", i);
1164                                 disable_irq(adapter->tx_scrq[i]->irq);
1165                         }
1166         }
1167
1168         if (adapter->rx_scrq) {
1169                 for (i = 0; i < adapter->req_rx_queues; i++) {
1170                         if (adapter->rx_scrq[i]->irq) {
1171                                 netdev_dbg(netdev,
1172                                            "Disabling rx_scrq[%d] irq\n", i);
1173                                 disable_irq(adapter->rx_scrq[i]->irq);
1174                         }
1175                 }
1176         }
1177         clean_rx_pools(adapter);
1178         clean_tx_pools(adapter);
1179 }
1180
1181 static int __ibmvnic_close(struct net_device *netdev)
1182 {
1183         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1184         int rc = 0;
1185
1186         adapter->state = VNIC_CLOSING;
1187         rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1188         if (rc)
1189                 return rc;
1190         ibmvnic_cleanup(netdev);
1191         adapter->state = VNIC_CLOSED;
1192         return 0;
1193 }
1194
1195 static int ibmvnic_close(struct net_device *netdev)
1196 {
1197         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1198         int rc;
1199
1200         mutex_lock(&adapter->reset_lock);
1201         rc = __ibmvnic_close(netdev);
1202         mutex_unlock(&adapter->reset_lock);
1203
1204         return rc;
1205 }
1206
1207 /**
1208  * build_hdr_data - creates L2/L3/L4 header data buffer
1209  * @hdr_field - bitfield determining needed headers
1210  * @skb - socket buffer
1211  * @hdr_len - array of header lengths
1212  * @tot_len - total length of data
1213  *
1214  * Reads hdr_field to determine which headers are needed by firmware.
1215  * Builds a buffer containing these headers.  Saves individual header
1216  * lengths and total buffer length to be used to build descriptors.
1217  */
1218 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1219                           int *hdr_len, u8 *hdr_data)
1220 {
1221         int len = 0;
1222         u8 *hdr;
1223
1224         hdr_len[0] = sizeof(struct ethhdr);
1225
1226         if (skb->protocol == htons(ETH_P_IP)) {
1227                 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1228                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1229                         hdr_len[2] = tcp_hdrlen(skb);
1230                 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1231                         hdr_len[2] = sizeof(struct udphdr);
1232         } else if (skb->protocol == htons(ETH_P_IPV6)) {
1233                 hdr_len[1] = sizeof(struct ipv6hdr);
1234                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1235                         hdr_len[2] = tcp_hdrlen(skb);
1236                 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1237                         hdr_len[2] = sizeof(struct udphdr);
1238         } else if (skb->protocol == htons(ETH_P_ARP)) {
1239                 hdr_len[1] = arp_hdr_len(skb->dev);
1240                 hdr_len[2] = 0;
1241         }
1242
1243         memset(hdr_data, 0, 120);
1244         if ((hdr_field >> 6) & 1) {
1245                 hdr = skb_mac_header(skb);
1246                 memcpy(hdr_data, hdr, hdr_len[0]);
1247                 len += hdr_len[0];
1248         }
1249
1250         if ((hdr_field >> 5) & 1) {
1251                 hdr = skb_network_header(skb);
1252                 memcpy(hdr_data + len, hdr, hdr_len[1]);
1253                 len += hdr_len[1];
1254         }
1255
1256         if ((hdr_field >> 4) & 1) {
1257                 hdr = skb_transport_header(skb);
1258                 memcpy(hdr_data + len, hdr, hdr_len[2]);
1259                 len += hdr_len[2];
1260         }
1261         return len;
1262 }
1263
1264 /**
1265  * create_hdr_descs - create header and header extension descriptors
1266  * @hdr_field - bitfield determining needed headers
1267  * @data - buffer containing header data
1268  * @len - length of data buffer
1269  * @hdr_len - array of individual header lengths
1270  * @scrq_arr - descriptor array
1271  *
1272  * Creates header and, if needed, header extension descriptors and
1273  * places them in a descriptor array, scrq_arr
1274  */
1275
1276 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1277                             union sub_crq *scrq_arr)
1278 {
1279         union sub_crq hdr_desc;
1280         int tmp_len = len;
1281         int num_descs = 0;
1282         u8 *data, *cur;
1283         int tmp;
1284
1285         while (tmp_len > 0) {
1286                 cur = hdr_data + len - tmp_len;
1287
1288                 memset(&hdr_desc, 0, sizeof(hdr_desc));
1289                 if (cur != hdr_data) {
1290                         data = hdr_desc.hdr_ext.data;
1291                         tmp = tmp_len > 29 ? 29 : tmp_len;
1292                         hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1293                         hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1294                         hdr_desc.hdr_ext.len = tmp;
1295                 } else {
1296                         data = hdr_desc.hdr.data;
1297                         tmp = tmp_len > 24 ? 24 : tmp_len;
1298                         hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1299                         hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1300                         hdr_desc.hdr.len = tmp;
1301                         hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1302                         hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1303                         hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1304                         hdr_desc.hdr.flag = hdr_field << 1;
1305                 }
1306                 memcpy(data, cur, tmp);
1307                 tmp_len -= tmp;
1308                 *scrq_arr = hdr_desc;
1309                 scrq_arr++;
1310                 num_descs++;
1311         }
1312
1313         return num_descs;
1314 }
1315
1316 /**
1317  * build_hdr_descs_arr - build a header descriptor array
1318  * @skb - socket buffer
1319  * @num_entries - number of descriptors to be sent
1320  * @subcrq - first TX descriptor
1321  * @hdr_field - bit field determining which headers will be sent
1322  *
1323  * This function will build a TX descriptor array with applicable
1324  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1325  */
1326
1327 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1328                                 int *num_entries, u8 hdr_field)
1329 {
1330         int hdr_len[3] = {0, 0, 0};
1331         int tot_len;
1332         u8 *hdr_data = txbuff->hdr_data;
1333
1334         tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1335                                  txbuff->hdr_data);
1336         *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1337                          txbuff->indir_arr + 1);
1338 }
1339
1340 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1341 {
1342         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1343         int queue_num = skb_get_queue_mapping(skb);
1344         u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1345         struct device *dev = &adapter->vdev->dev;
1346         struct ibmvnic_tx_buff *tx_buff = NULL;
1347         struct ibmvnic_sub_crq_queue *tx_scrq;
1348         struct ibmvnic_tx_pool *tx_pool;
1349         unsigned int tx_send_failed = 0;
1350         unsigned int tx_map_failed = 0;
1351         unsigned int tx_dropped = 0;
1352         unsigned int tx_packets = 0;
1353         unsigned int tx_bytes = 0;
1354         dma_addr_t data_dma_addr;
1355         struct netdev_queue *txq;
1356         unsigned long lpar_rc;
1357         union sub_crq tx_crq;
1358         unsigned int offset;
1359         int num_entries = 1;
1360         unsigned char *dst;
1361         u64 *handle_array;
1362         int index = 0;
1363         u8 proto = 0;
1364         int ret = 0;
1365
1366         if (adapter->resetting) {
1367                 if (!netif_subqueue_stopped(netdev, skb))
1368                         netif_stop_subqueue(netdev, queue_num);
1369                 dev_kfree_skb_any(skb);
1370
1371                 tx_send_failed++;
1372                 tx_dropped++;
1373                 ret = NETDEV_TX_OK;
1374                 goto out;
1375         }
1376
1377         tx_pool = &adapter->tx_pool[queue_num];
1378         tx_scrq = adapter->tx_scrq[queue_num];
1379         txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1380         handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1381                 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1382
1383         index = tx_pool->free_map[tx_pool->consumer_index];
1384
1385         if (skb_is_gso(skb)) {
1386                 offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ;
1387                 dst = tx_pool->tso_ltb.buff + offset;
1388                 memset(dst, 0, IBMVNIC_TSO_BUF_SZ);
1389                 data_dma_addr = tx_pool->tso_ltb.addr + offset;
1390                 tx_pool->tso_index++;
1391                 if (tx_pool->tso_index == IBMVNIC_TSO_BUFS)
1392                         tx_pool->tso_index = 0;
1393         } else {
1394                 offset = index * adapter->req_mtu;
1395                 dst = tx_pool->long_term_buff.buff + offset;
1396                 memset(dst, 0, adapter->req_mtu);
1397                 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1398         }
1399
1400         if (skb_shinfo(skb)->nr_frags) {
1401                 int cur, i;
1402
1403                 /* Copy the head */
1404                 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1405                 cur = skb_headlen(skb);
1406
1407                 /* Copy the frags */
1408                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1409                         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1410
1411                         memcpy(dst + cur,
1412                                page_address(skb_frag_page(frag)) +
1413                                frag->page_offset, skb_frag_size(frag));
1414                         cur += skb_frag_size(frag);
1415                 }
1416         } else {
1417                 skb_copy_from_linear_data(skb, dst, skb->len);
1418         }
1419
1420         tx_pool->consumer_index =
1421             (tx_pool->consumer_index + 1) %
1422                 adapter->req_tx_entries_per_subcrq;
1423
1424         tx_buff = &tx_pool->tx_buff[index];
1425         tx_buff->skb = skb;
1426         tx_buff->data_dma[0] = data_dma_addr;
1427         tx_buff->data_len[0] = skb->len;
1428         tx_buff->index = index;
1429         tx_buff->pool_index = queue_num;
1430         tx_buff->last_frag = true;
1431
1432         memset(&tx_crq, 0, sizeof(tx_crq));
1433         tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1434         tx_crq.v1.type = IBMVNIC_TX_DESC;
1435         tx_crq.v1.n_crq_elem = 1;
1436         tx_crq.v1.n_sge = 1;
1437         tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1438         tx_crq.v1.correlator = cpu_to_be32(index);
1439         if (skb_is_gso(skb))
1440                 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id);
1441         else
1442                 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1443         tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1444         tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1445
1446         if (adapter->vlan_header_insertion) {
1447                 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1448                 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1449         }
1450
1451         if (skb->protocol == htons(ETH_P_IP)) {
1452                 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1453                 proto = ip_hdr(skb)->protocol;
1454         } else if (skb->protocol == htons(ETH_P_IPV6)) {
1455                 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1456                 proto = ipv6_hdr(skb)->nexthdr;
1457         }
1458
1459         if (proto == IPPROTO_TCP)
1460                 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1461         else if (proto == IPPROTO_UDP)
1462                 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1463
1464         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1465                 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1466                 hdrs += 2;
1467         }
1468         if (skb_is_gso(skb)) {
1469                 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1470                 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1471                 hdrs += 2;
1472         }
1473         /* determine if l2/3/4 headers are sent to firmware */
1474         if ((*hdrs >> 7) & 1) {
1475                 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1476                 tx_crq.v1.n_crq_elem = num_entries;
1477                 tx_buff->num_entries = num_entries;
1478                 tx_buff->indir_arr[0] = tx_crq;
1479                 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1480                                                     sizeof(tx_buff->indir_arr),
1481                                                     DMA_TO_DEVICE);
1482                 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1483                         dev_kfree_skb_any(skb);
1484                         tx_buff->skb = NULL;
1485                         if (!firmware_has_feature(FW_FEATURE_CMO))
1486                                 dev_err(dev, "tx: unable to map descriptor array\n");
1487                         tx_map_failed++;
1488                         tx_dropped++;
1489                         ret = NETDEV_TX_OK;
1490                         goto out;
1491                 }
1492                 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1493                                                (u64)tx_buff->indir_dma,
1494                                                (u64)num_entries);
1495         } else {
1496                 tx_buff->num_entries = num_entries;
1497                 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1498                                       &tx_crq);
1499         }
1500         if (lpar_rc != H_SUCCESS) {
1501                 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1502
1503                 if (tx_pool->consumer_index == 0)
1504                         tx_pool->consumer_index =
1505                                 adapter->req_tx_entries_per_subcrq - 1;
1506                 else
1507                         tx_pool->consumer_index--;
1508
1509                 dev_kfree_skb_any(skb);
1510                 tx_buff->skb = NULL;
1511
1512                 if (lpar_rc == H_CLOSED) {
1513                         /* Disable TX and report carrier off if queue is closed.
1514                          * Firmware guarantees that a signal will be sent to the
1515                          * driver, triggering a reset or some other action.
1516                          */
1517                         netif_tx_stop_all_queues(netdev);
1518                         netif_carrier_off(netdev);
1519                 }
1520
1521                 tx_send_failed++;
1522                 tx_dropped++;
1523                 ret = NETDEV_TX_OK;
1524                 goto out;
1525         }
1526
1527         if (atomic_add_return(num_entries, &tx_scrq->used)
1528                                         >= adapter->req_tx_entries_per_subcrq) {
1529                 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1530                 netif_stop_subqueue(netdev, queue_num);
1531         }
1532
1533         tx_packets++;
1534         tx_bytes += skb->len;
1535         txq->trans_start = jiffies;
1536         ret = NETDEV_TX_OK;
1537
1538 out:
1539         netdev->stats.tx_dropped += tx_dropped;
1540         netdev->stats.tx_bytes += tx_bytes;
1541         netdev->stats.tx_packets += tx_packets;
1542         adapter->tx_send_failed += tx_send_failed;
1543         adapter->tx_map_failed += tx_map_failed;
1544         adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1545         adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1546         adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1547
1548         return ret;
1549 }
1550
1551 static void ibmvnic_set_multi(struct net_device *netdev)
1552 {
1553         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1554         struct netdev_hw_addr *ha;
1555         union ibmvnic_crq crq;
1556
1557         memset(&crq, 0, sizeof(crq));
1558         crq.request_capability.first = IBMVNIC_CRQ_CMD;
1559         crq.request_capability.cmd = REQUEST_CAPABILITY;
1560
1561         if (netdev->flags & IFF_PROMISC) {
1562                 if (!adapter->promisc_supported)
1563                         return;
1564         } else {
1565                 if (netdev->flags & IFF_ALLMULTI) {
1566                         /* Accept all multicast */
1567                         memset(&crq, 0, sizeof(crq));
1568                         crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1569                         crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1570                         crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1571                         ibmvnic_send_crq(adapter, &crq);
1572                 } else if (netdev_mc_empty(netdev)) {
1573                         /* Reject all multicast */
1574                         memset(&crq, 0, sizeof(crq));
1575                         crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1576                         crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1577                         crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1578                         ibmvnic_send_crq(adapter, &crq);
1579                 } else {
1580                         /* Accept one or more multicast(s) */
1581                         netdev_for_each_mc_addr(ha, netdev) {
1582                                 memset(&crq, 0, sizeof(crq));
1583                                 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1584                                 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1585                                 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1586                                 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1587                                                 ha->addr);
1588                                 ibmvnic_send_crq(adapter, &crq);
1589                         }
1590                 }
1591         }
1592 }
1593
1594 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
1595 {
1596         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1597         struct sockaddr *addr = p;
1598         union ibmvnic_crq crq;
1599
1600         if (!is_valid_ether_addr(addr->sa_data))
1601                 return -EADDRNOTAVAIL;
1602
1603         memset(&crq, 0, sizeof(crq));
1604         crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1605         crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1606         ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1607
1608         init_completion(&adapter->fw_done);
1609         ibmvnic_send_crq(adapter, &crq);
1610         wait_for_completion(&adapter->fw_done);
1611         /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1612         return adapter->fw_done_rc ? -EIO : 0;
1613 }
1614
1615 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1616 {
1617         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1618         struct sockaddr *addr = p;
1619         int rc;
1620
1621         if (adapter->state == VNIC_PROBED) {
1622                 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1623                 adapter->mac_change_pending = true;
1624                 return 0;
1625         }
1626
1627         rc = __ibmvnic_set_mac(netdev, addr);
1628
1629         return rc;
1630 }
1631
1632 /**
1633  * do_reset returns zero if we are able to keep processing reset events, or
1634  * non-zero if we hit a fatal error and must halt.
1635  */
1636 static int do_reset(struct ibmvnic_adapter *adapter,
1637                     struct ibmvnic_rwi *rwi, u32 reset_state)
1638 {
1639         u64 old_num_rx_queues, old_num_tx_queues;
1640         struct net_device *netdev = adapter->netdev;
1641         int i, rc;
1642
1643         netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1644                    rwi->reset_reason);
1645
1646         netif_carrier_off(netdev);
1647         adapter->reset_reason = rwi->reset_reason;
1648
1649         old_num_rx_queues = adapter->req_rx_queues;
1650         old_num_tx_queues = adapter->req_tx_queues;
1651
1652         if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1653                 rc = ibmvnic_reenable_crq_queue(adapter);
1654                 if (rc)
1655                         return 0;
1656                 ibmvnic_cleanup(netdev);
1657         } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) {
1658                 ibmvnic_cleanup(netdev);
1659         } else {
1660                 rc = __ibmvnic_close(netdev);
1661                 if (rc)
1662                         return rc;
1663         }
1664
1665         if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1666             adapter->wait_for_reset) {
1667                 release_resources(adapter);
1668                 release_sub_crqs(adapter, 1);
1669                 release_crq_queue(adapter);
1670         }
1671
1672         if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1673                 /* remove the closed state so when we call open it appears
1674                  * we are coming from the probed state.
1675                  */
1676                 adapter->state = VNIC_PROBED;
1677
1678                 rc = ibmvnic_init(adapter);
1679                 if (rc)
1680                         return IBMVNIC_INIT_FAILED;
1681
1682                 /* If the adapter was in PROBE state prior to the reset,
1683                  * exit here.
1684                  */
1685                 if (reset_state == VNIC_PROBED)
1686                         return 0;
1687
1688                 rc = ibmvnic_login(netdev);
1689                 if (rc) {
1690                         adapter->state = VNIC_PROBED;
1691                         return 0;
1692                 }
1693
1694                 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1695                     adapter->wait_for_reset) {
1696                         rc = init_resources(adapter);
1697                         if (rc)
1698                                 return rc;
1699                 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1700                            adapter->req_tx_queues != old_num_tx_queues) {
1701                         adapter->map_id = 1;
1702                         release_rx_pools(adapter);
1703                         release_tx_pools(adapter);
1704                         init_rx_pools(netdev);
1705                         init_tx_pools(netdev);
1706
1707                         release_napi(adapter);
1708                         init_napi(adapter);
1709                 } else {
1710                         rc = reset_tx_pools(adapter);
1711                         if (rc)
1712                                 return rc;
1713
1714                         rc = reset_rx_pools(adapter);
1715                         if (rc)
1716                                 return rc;
1717
1718                         if (reset_state == VNIC_CLOSED)
1719                                 return 0;
1720                 }
1721         }
1722
1723         rc = __ibmvnic_open(netdev);
1724         if (rc) {
1725                 if (list_empty(&adapter->rwi_list))
1726                         adapter->state = VNIC_CLOSED;
1727                 else
1728                         adapter->state = reset_state;
1729
1730                 return 0;
1731         }
1732
1733         /* kick napi */
1734         for (i = 0; i < adapter->req_rx_queues; i++)
1735                 napi_schedule(&adapter->napi[i]);
1736
1737         if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1738                 netdev_notify_peers(netdev);
1739
1740         netif_carrier_on(netdev);
1741
1742         return 0;
1743 }
1744
1745 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1746 {
1747         struct ibmvnic_rwi *rwi;
1748
1749         mutex_lock(&adapter->rwi_lock);
1750
1751         if (!list_empty(&adapter->rwi_list)) {
1752                 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1753                                        list);
1754                 list_del(&rwi->list);
1755         } else {
1756                 rwi = NULL;
1757         }
1758
1759         mutex_unlock(&adapter->rwi_lock);
1760         return rwi;
1761 }
1762
1763 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1764 {
1765         struct ibmvnic_rwi *rwi;
1766
1767         rwi = get_next_rwi(adapter);
1768         while (rwi) {
1769                 kfree(rwi);
1770                 rwi = get_next_rwi(adapter);
1771         }
1772 }
1773
1774 static void __ibmvnic_reset(struct work_struct *work)
1775 {
1776         struct ibmvnic_rwi *rwi;
1777         struct ibmvnic_adapter *adapter;
1778         struct net_device *netdev;
1779         u32 reset_state;
1780         int rc = 0;
1781
1782         adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1783         netdev = adapter->netdev;
1784
1785         mutex_lock(&adapter->reset_lock);
1786         adapter->resetting = true;
1787         reset_state = adapter->state;
1788
1789         rwi = get_next_rwi(adapter);
1790         while (rwi) {
1791                 rc = do_reset(adapter, rwi, reset_state);
1792                 kfree(rwi);
1793                 if (rc && rc != IBMVNIC_INIT_FAILED)
1794                         break;
1795
1796                 rwi = get_next_rwi(adapter);
1797         }
1798
1799         if (adapter->wait_for_reset) {
1800                 adapter->wait_for_reset = false;
1801                 adapter->reset_done_rc = rc;
1802                 complete(&adapter->reset_done);
1803         }
1804
1805         if (rc) {
1806                 netdev_dbg(adapter->netdev, "Reset failed\n");
1807                 free_all_rwi(adapter);
1808                 mutex_unlock(&adapter->reset_lock);
1809                 return;
1810         }
1811
1812         adapter->resetting = false;
1813         mutex_unlock(&adapter->reset_lock);
1814 }
1815
1816 static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
1817                           enum ibmvnic_reset_reason reason)
1818 {
1819         struct ibmvnic_rwi *rwi, *tmp;
1820         struct net_device *netdev = adapter->netdev;
1821         struct list_head *entry;
1822
1823         if (adapter->state == VNIC_REMOVING ||
1824             adapter->state == VNIC_REMOVED) {
1825                 netdev_dbg(netdev, "Adapter removing, skipping reset\n");
1826                 return;
1827         }
1828
1829         if (adapter->state == VNIC_PROBING) {
1830                 netdev_warn(netdev, "Adapter reset during probe\n");
1831                 adapter->init_done_rc = EAGAIN;
1832                 return;
1833         }
1834
1835         mutex_lock(&adapter->rwi_lock);
1836
1837         list_for_each(entry, &adapter->rwi_list) {
1838                 tmp = list_entry(entry, struct ibmvnic_rwi, list);
1839                 if (tmp->reset_reason == reason) {
1840                         netdev_dbg(netdev, "Skipping matching reset\n");
1841                         mutex_unlock(&adapter->rwi_lock);
1842                         return;
1843                 }
1844         }
1845
1846         rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1847         if (!rwi) {
1848                 mutex_unlock(&adapter->rwi_lock);
1849                 ibmvnic_close(netdev);
1850                 return;
1851         }
1852
1853         rwi->reset_reason = reason;
1854         list_add_tail(&rwi->list, &adapter->rwi_list);
1855         mutex_unlock(&adapter->rwi_lock);
1856
1857         netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
1858         schedule_work(&adapter->ibmvnic_reset);
1859 }
1860
1861 static void ibmvnic_tx_timeout(struct net_device *dev)
1862 {
1863         struct ibmvnic_adapter *adapter = netdev_priv(dev);
1864
1865         ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
1866 }
1867
1868 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1869                                   struct ibmvnic_rx_buff *rx_buff)
1870 {
1871         struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1872
1873         rx_buff->skb = NULL;
1874
1875         pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1876         pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1877
1878         atomic_dec(&pool->available);
1879 }
1880
1881 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1882 {
1883         struct net_device *netdev = napi->dev;
1884         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1885         int scrq_num = (int)(napi - adapter->napi);
1886         int frames_processed = 0;
1887
1888 restart_poll:
1889         while (frames_processed < budget) {
1890                 struct sk_buff *skb;
1891                 struct ibmvnic_rx_buff *rx_buff;
1892                 union sub_crq *next;
1893                 u32 length;
1894                 u16 offset;
1895                 u8 flags = 0;
1896
1897                 if (unlikely(adapter->resetting &&
1898                              adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
1899                         enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1900                         napi_complete_done(napi, frames_processed);
1901                         return frames_processed;
1902                 }
1903
1904                 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1905                         break;
1906                 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1907                 rx_buff =
1908                     (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1909                                                           rx_comp.correlator);
1910                 /* do error checking */
1911                 if (next->rx_comp.rc) {
1912                         netdev_dbg(netdev, "rx buffer returned with rc %x\n",
1913                                    be16_to_cpu(next->rx_comp.rc));
1914                         /* free the entry */
1915                         next->rx_comp.first = 0;
1916                         dev_kfree_skb_any(rx_buff->skb);
1917                         remove_buff_from_pool(adapter, rx_buff);
1918                         continue;
1919                 } else if (!rx_buff->skb) {
1920                         /* free the entry */
1921                         next->rx_comp.first = 0;
1922                         remove_buff_from_pool(adapter, rx_buff);
1923                         continue;
1924                 }
1925
1926                 length = be32_to_cpu(next->rx_comp.len);
1927                 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1928                 flags = next->rx_comp.flags;
1929                 skb = rx_buff->skb;
1930                 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1931                                         length);
1932
1933                 /* VLAN Header has been stripped by the system firmware and
1934                  * needs to be inserted by the driver
1935                  */
1936                 if (adapter->rx_vlan_header_insertion &&
1937                     (flags & IBMVNIC_VLAN_STRIPPED))
1938                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1939                                                ntohs(next->rx_comp.vlan_tci));
1940
1941                 /* free the entry */
1942                 next->rx_comp.first = 0;
1943                 remove_buff_from_pool(adapter, rx_buff);
1944
1945                 skb_put(skb, length);
1946                 skb->protocol = eth_type_trans(skb, netdev);
1947                 skb_record_rx_queue(skb, scrq_num);
1948
1949                 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1950                     flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1951                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1952                 }
1953
1954                 length = skb->len;
1955                 napi_gro_receive(napi, skb); /* send it up */
1956                 netdev->stats.rx_packets++;
1957                 netdev->stats.rx_bytes += length;
1958                 adapter->rx_stats_buffers[scrq_num].packets++;
1959                 adapter->rx_stats_buffers[scrq_num].bytes += length;
1960                 frames_processed++;
1961         }
1962
1963         if (adapter->state != VNIC_CLOSING)
1964                 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1965
1966         if (frames_processed < budget) {
1967                 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1968                 napi_complete_done(napi, frames_processed);
1969                 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1970                     napi_reschedule(napi)) {
1971                         disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1972                         goto restart_poll;
1973                 }
1974         }
1975         return frames_processed;
1976 }
1977
1978 #ifdef CONFIG_NET_POLL_CONTROLLER
1979 static void ibmvnic_netpoll_controller(struct net_device *dev)
1980 {
1981         struct ibmvnic_adapter *adapter = netdev_priv(dev);
1982         int i;
1983
1984         replenish_pools(netdev_priv(dev));
1985         for (i = 0; i < adapter->req_rx_queues; i++)
1986                 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1987                                      adapter->rx_scrq[i]);
1988 }
1989 #endif
1990
1991 static int wait_for_reset(struct ibmvnic_adapter *adapter)
1992 {
1993         adapter->fallback.mtu = adapter->req_mtu;
1994         adapter->fallback.rx_queues = adapter->req_rx_queues;
1995         adapter->fallback.tx_queues = adapter->req_tx_queues;
1996         adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
1997         adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
1998
1999         init_completion(&adapter->reset_done);
2000         ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2001         adapter->wait_for_reset = true;
2002         wait_for_completion(&adapter->reset_done);
2003
2004         if (adapter->reset_done_rc) {
2005                 adapter->desired.mtu = adapter->fallback.mtu;
2006                 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2007                 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2008                 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2009                 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2010
2011                 init_completion(&adapter->reset_done);
2012                 ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2013                 wait_for_completion(&adapter->reset_done);
2014         }
2015         adapter->wait_for_reset = false;
2016
2017         return adapter->reset_done_rc;
2018 }
2019
2020 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2021 {
2022         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2023
2024         adapter->desired.mtu = new_mtu + ETH_HLEN;
2025
2026         return wait_for_reset(adapter);
2027 }
2028
2029 static const struct net_device_ops ibmvnic_netdev_ops = {
2030         .ndo_open               = ibmvnic_open,
2031         .ndo_stop               = ibmvnic_close,
2032         .ndo_start_xmit         = ibmvnic_xmit,
2033         .ndo_set_rx_mode        = ibmvnic_set_multi,
2034         .ndo_set_mac_address    = ibmvnic_set_mac,
2035         .ndo_validate_addr      = eth_validate_addr,
2036         .ndo_tx_timeout         = ibmvnic_tx_timeout,
2037 #ifdef CONFIG_NET_POLL_CONTROLLER
2038         .ndo_poll_controller    = ibmvnic_netpoll_controller,
2039 #endif
2040         .ndo_change_mtu         = ibmvnic_change_mtu,
2041 };
2042
2043 /* ethtool functions */
2044
2045 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2046                                       struct ethtool_link_ksettings *cmd)
2047 {
2048         u32 supported, advertising;
2049
2050         supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
2051                           SUPPORTED_FIBRE);
2052         advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
2053                             ADVERTISED_FIBRE);
2054         cmd->base.speed = SPEED_1000;
2055         cmd->base.duplex = DUPLEX_FULL;
2056         cmd->base.port = PORT_FIBRE;
2057         cmd->base.phy_address = 0;
2058         cmd->base.autoneg = AUTONEG_ENABLE;
2059
2060         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2061                                                 supported);
2062         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2063                                                 advertising);
2064
2065         return 0;
2066 }
2067
2068 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2069                                 struct ethtool_drvinfo *info)
2070 {
2071         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2072
2073         strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2074         strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2075         strlcpy(info->fw_version, adapter->fw_version,
2076                 sizeof(info->fw_version));
2077 }
2078
2079 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2080 {
2081         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2082
2083         return adapter->msg_enable;
2084 }
2085
2086 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2087 {
2088         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2089
2090         adapter->msg_enable = data;
2091 }
2092
2093 static u32 ibmvnic_get_link(struct net_device *netdev)
2094 {
2095         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2096
2097         /* Don't need to send a query because we request a logical link up at
2098          * init and then we wait for link state indications
2099          */
2100         return adapter->logical_link_state;
2101 }
2102
2103 static void ibmvnic_get_ringparam(struct net_device *netdev,
2104                                   struct ethtool_ringparam *ring)
2105 {
2106         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2107
2108         ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2109         ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2110         ring->rx_mini_max_pending = 0;
2111         ring->rx_jumbo_max_pending = 0;
2112         ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2113         ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2114         ring->rx_mini_pending = 0;
2115         ring->rx_jumbo_pending = 0;
2116 }
2117
2118 static int ibmvnic_set_ringparam(struct net_device *netdev,
2119                                  struct ethtool_ringparam *ring)
2120 {
2121         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2122
2123         if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
2124             ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2125                 netdev_err(netdev, "Invalid request.\n");
2126                 netdev_err(netdev, "Max tx buffers = %llu\n",
2127                            adapter->max_rx_add_entries_per_subcrq);
2128                 netdev_err(netdev, "Max rx buffers = %llu\n",
2129                            adapter->max_tx_entries_per_subcrq);
2130                 return -EINVAL;
2131         }
2132
2133         adapter->desired.rx_entries = ring->rx_pending;
2134         adapter->desired.tx_entries = ring->tx_pending;
2135
2136         return wait_for_reset(adapter);
2137 }
2138
2139 static void ibmvnic_get_channels(struct net_device *netdev,
2140                                  struct ethtool_channels *channels)
2141 {
2142         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2143
2144         channels->max_rx = adapter->max_rx_queues;
2145         channels->max_tx = adapter->max_tx_queues;
2146         channels->max_other = 0;
2147         channels->max_combined = 0;
2148         channels->rx_count = adapter->req_rx_queues;
2149         channels->tx_count = adapter->req_tx_queues;
2150         channels->other_count = 0;
2151         channels->combined_count = 0;
2152 }
2153
2154 static int ibmvnic_set_channels(struct net_device *netdev,
2155                                 struct ethtool_channels *channels)
2156 {
2157         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2158
2159         adapter->desired.rx_queues = channels->rx_count;
2160         adapter->desired.tx_queues = channels->tx_count;
2161
2162         return wait_for_reset(adapter);
2163 }
2164
2165 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2166 {
2167         struct ibmvnic_adapter *adapter = netdev_priv(dev);
2168         int i;
2169
2170         if (stringset != ETH_SS_STATS)
2171                 return;
2172
2173         for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2174                 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2175
2176         for (i = 0; i < adapter->req_tx_queues; i++) {
2177                 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2178                 data += ETH_GSTRING_LEN;
2179
2180                 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2181                 data += ETH_GSTRING_LEN;
2182
2183                 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2184                 data += ETH_GSTRING_LEN;
2185         }
2186
2187         for (i = 0; i < adapter->req_rx_queues; i++) {
2188                 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2189                 data += ETH_GSTRING_LEN;
2190
2191                 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2192                 data += ETH_GSTRING_LEN;
2193
2194                 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2195                 data += ETH_GSTRING_LEN;
2196         }
2197 }
2198
2199 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2200 {
2201         struct ibmvnic_adapter *adapter = netdev_priv(dev);
2202
2203         switch (sset) {
2204         case ETH_SS_STATS:
2205                 return ARRAY_SIZE(ibmvnic_stats) +
2206                        adapter->req_tx_queues * NUM_TX_STATS +
2207                        adapter->req_rx_queues * NUM_RX_STATS;
2208         default:
2209                 return -EOPNOTSUPP;
2210         }
2211 }
2212
2213 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2214                                       struct ethtool_stats *stats, u64 *data)
2215 {
2216         struct ibmvnic_adapter *adapter = netdev_priv(dev);
2217         union ibmvnic_crq crq;
2218         int i, j;
2219
2220         memset(&crq, 0, sizeof(crq));
2221         crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2222         crq.request_statistics.cmd = REQUEST_STATISTICS;
2223         crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2224         crq.request_statistics.len =
2225             cpu_to_be32(sizeof(struct ibmvnic_statistics));
2226
2227         /* Wait for data to be written */
2228         init_completion(&adapter->stats_done);
2229         ibmvnic_send_crq(adapter, &crq);
2230         wait_for_completion(&adapter->stats_done);
2231
2232         for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2233                 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2234                                                 ibmvnic_stats[i].offset));
2235
2236         for (j = 0; j < adapter->req_tx_queues; j++) {
2237                 data[i] = adapter->tx_stats_buffers[j].packets;
2238                 i++;
2239                 data[i] = adapter->tx_stats_buffers[j].bytes;
2240                 i++;
2241                 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2242                 i++;
2243         }
2244
2245         for (j = 0; j < adapter->req_rx_queues; j++) {
2246                 data[i] = adapter->rx_stats_buffers[j].packets;
2247                 i++;
2248                 data[i] = adapter->rx_stats_buffers[j].bytes;
2249                 i++;
2250                 data[i] = adapter->rx_stats_buffers[j].interrupts;
2251                 i++;
2252         }
2253 }
2254
2255 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2256         .get_drvinfo            = ibmvnic_get_drvinfo,
2257         .get_msglevel           = ibmvnic_get_msglevel,
2258         .set_msglevel           = ibmvnic_set_msglevel,
2259         .get_link               = ibmvnic_get_link,
2260         .get_ringparam          = ibmvnic_get_ringparam,
2261         .set_ringparam          = ibmvnic_set_ringparam,
2262         .get_channels           = ibmvnic_get_channels,
2263         .set_channels           = ibmvnic_set_channels,
2264         .get_strings            = ibmvnic_get_strings,
2265         .get_sset_count         = ibmvnic_get_sset_count,
2266         .get_ethtool_stats      = ibmvnic_get_ethtool_stats,
2267         .get_link_ksettings     = ibmvnic_get_link_ksettings,
2268 };
2269
2270 /* Routines for managing CRQs/sCRQs  */
2271
2272 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2273                                    struct ibmvnic_sub_crq_queue *scrq)
2274 {
2275         int rc;
2276
2277         if (scrq->irq) {
2278                 free_irq(scrq->irq, scrq);
2279                 irq_dispose_mapping(scrq->irq);
2280                 scrq->irq = 0;
2281         }
2282
2283         memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2284         scrq->cur = 0;
2285
2286         rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2287                            4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2288         return rc;
2289 }
2290
2291 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2292 {
2293         int i, rc;
2294
2295         for (i = 0; i < adapter->req_tx_queues; i++) {
2296                 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2297                 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2298                 if (rc)
2299                         return rc;
2300         }
2301
2302         for (i = 0; i < adapter->req_rx_queues; i++) {
2303                 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2304                 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2305                 if (rc)
2306                         return rc;
2307         }
2308
2309         return rc;
2310 }
2311
2312 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2313                                   struct ibmvnic_sub_crq_queue *scrq,
2314                                   bool do_h_free)
2315 {
2316         struct device *dev = &adapter->vdev->dev;
2317         long rc;
2318
2319         netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2320
2321         if (do_h_free) {
2322                 /* Close the sub-crqs */
2323                 do {
2324                         rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2325                                                 adapter->vdev->unit_address,
2326                                                 scrq->crq_num);
2327                 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2328
2329                 if (rc) {
2330                         netdev_err(adapter->netdev,
2331                                    "Failed to release sub-CRQ %16lx, rc = %ld\n",
2332                                    scrq->crq_num, rc);
2333                 }
2334         }
2335
2336         dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2337                          DMA_BIDIRECTIONAL);
2338         free_pages((unsigned long)scrq->msgs, 2);
2339         kfree(scrq);
2340 }
2341
2342 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2343                                                         *adapter)
2344 {
2345         struct device *dev = &adapter->vdev->dev;
2346         struct ibmvnic_sub_crq_queue *scrq;
2347         int rc;
2348
2349         scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2350         if (!scrq)
2351                 return NULL;
2352
2353         scrq->msgs =
2354                 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2355         if (!scrq->msgs) {
2356                 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2357                 goto zero_page_failed;
2358         }
2359
2360         scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2361                                          DMA_BIDIRECTIONAL);
2362         if (dma_mapping_error(dev, scrq->msg_token)) {
2363                 dev_warn(dev, "Couldn't map crq queue messages page\n");
2364                 goto map_failed;
2365         }
2366
2367         rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2368                            4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2369
2370         if (rc == H_RESOURCE)
2371                 rc = ibmvnic_reset_crq(adapter);
2372
2373         if (rc == H_CLOSED) {
2374                 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2375         } else if (rc) {
2376                 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2377                 goto reg_failed;
2378         }
2379
2380         scrq->adapter = adapter;
2381         scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2382         spin_lock_init(&scrq->lock);
2383
2384         netdev_dbg(adapter->netdev,
2385                    "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2386                    scrq->crq_num, scrq->hw_irq, scrq->irq);
2387
2388         return scrq;
2389
2390 reg_failed:
2391         dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2392                          DMA_BIDIRECTIONAL);
2393 map_failed:
2394         free_pages((unsigned long)scrq->msgs, 2);
2395 zero_page_failed:
2396         kfree(scrq);
2397
2398         return NULL;
2399 }
2400
2401 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2402 {
2403         int i;
2404
2405         if (adapter->tx_scrq) {
2406                 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2407                         if (!adapter->tx_scrq[i])
2408                                 continue;
2409
2410                         netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2411                                    i);
2412                         if (adapter->tx_scrq[i]->irq) {
2413                                 free_irq(adapter->tx_scrq[i]->irq,
2414                                          adapter->tx_scrq[i]);
2415                                 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2416                                 adapter->tx_scrq[i]->irq = 0;
2417                         }
2418
2419                         release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2420                                               do_h_free);
2421                 }
2422
2423                 kfree(adapter->tx_scrq);
2424                 adapter->tx_scrq = NULL;
2425                 adapter->num_active_tx_scrqs = 0;
2426         }
2427
2428         if (adapter->rx_scrq) {
2429                 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2430                         if (!adapter->rx_scrq[i])
2431                                 continue;
2432
2433                         netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2434                                    i);
2435                         if (adapter->rx_scrq[i]->irq) {
2436                                 free_irq(adapter->rx_scrq[i]->irq,
2437                                          adapter->rx_scrq[i]);
2438                                 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2439                                 adapter->rx_scrq[i]->irq = 0;
2440                         }
2441
2442                         release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2443                                               do_h_free);
2444                 }
2445
2446                 kfree(adapter->rx_scrq);
2447                 adapter->rx_scrq = NULL;
2448                 adapter->num_active_rx_scrqs = 0;
2449         }
2450 }
2451
2452 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2453                             struct ibmvnic_sub_crq_queue *scrq)
2454 {
2455         struct device *dev = &adapter->vdev->dev;
2456         unsigned long rc;
2457
2458         rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2459                                 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2460         if (rc)
2461                 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2462                         scrq->hw_irq, rc);
2463         return rc;
2464 }
2465
2466 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2467                            struct ibmvnic_sub_crq_queue *scrq)
2468 {
2469         struct device *dev = &adapter->vdev->dev;
2470         unsigned long rc;
2471
2472         if (scrq->hw_irq > 0x100000000ULL) {
2473                 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2474                 return 1;
2475         }
2476
2477         rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2478                                 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2479         if (rc)
2480                 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2481                         scrq->hw_irq, rc);
2482         return rc;
2483 }
2484
2485 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2486                                struct ibmvnic_sub_crq_queue *scrq)
2487 {
2488         struct device *dev = &adapter->vdev->dev;
2489         struct ibmvnic_tx_buff *txbuff;
2490         union sub_crq *next;
2491         int index;
2492         int i, j;
2493         u8 first;
2494
2495 restart_loop:
2496         while (pending_scrq(adapter, scrq)) {
2497                 unsigned int pool = scrq->pool_index;
2498                 int num_entries = 0;
2499
2500                 next = ibmvnic_next_scrq(adapter, scrq);
2501                 for (i = 0; i < next->tx_comp.num_comps; i++) {
2502                         if (next->tx_comp.rcs[i]) {
2503                                 dev_err(dev, "tx error %x\n",
2504                                         next->tx_comp.rcs[i]);
2505                                 continue;
2506                         }
2507                         index = be32_to_cpu(next->tx_comp.correlators[i]);
2508                         txbuff = &adapter->tx_pool[pool].tx_buff[index];
2509
2510                         for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2511                                 if (!txbuff->data_dma[j])
2512                                         continue;
2513
2514                                 txbuff->data_dma[j] = 0;
2515                         }
2516                         /* if sub_crq was sent indirectly */
2517                         first = txbuff->indir_arr[0].generic.first;
2518                         if (first == IBMVNIC_CRQ_CMD) {
2519                                 dma_unmap_single(dev, txbuff->indir_dma,
2520                                                  sizeof(txbuff->indir_arr),
2521                                                  DMA_TO_DEVICE);
2522                         }
2523
2524                         if (txbuff->last_frag) {
2525                                 dev_kfree_skb_any(txbuff->skb);
2526                                 txbuff->skb = NULL;
2527                         }
2528
2529                         num_entries += txbuff->num_entries;
2530
2531                         adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
2532                                                      producer_index] = index;
2533                         adapter->tx_pool[pool].producer_index =
2534                             (adapter->tx_pool[pool].producer_index + 1) %
2535                             adapter->req_tx_entries_per_subcrq;
2536                 }
2537                 /* remove tx_comp scrq*/
2538                 next->tx_comp.first = 0;
2539
2540                 if (atomic_sub_return(num_entries, &scrq->used) <=
2541                     (adapter->req_tx_entries_per_subcrq / 2) &&
2542                     __netif_subqueue_stopped(adapter->netdev,
2543                                              scrq->pool_index)) {
2544                         netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2545                         netdev_dbg(adapter->netdev, "Started queue %d\n",
2546                                    scrq->pool_index);
2547                 }
2548         }
2549
2550         enable_scrq_irq(adapter, scrq);
2551
2552         if (pending_scrq(adapter, scrq)) {
2553                 disable_scrq_irq(adapter, scrq);
2554                 goto restart_loop;
2555         }
2556
2557         return 0;
2558 }
2559
2560 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2561 {
2562         struct ibmvnic_sub_crq_queue *scrq = instance;
2563         struct ibmvnic_adapter *adapter = scrq->adapter;
2564
2565         disable_scrq_irq(adapter, scrq);
2566         ibmvnic_complete_tx(adapter, scrq);
2567
2568         return IRQ_HANDLED;
2569 }
2570
2571 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2572 {
2573         struct ibmvnic_sub_crq_queue *scrq = instance;
2574         struct ibmvnic_adapter *adapter = scrq->adapter;
2575
2576         /* When booting a kdump kernel we can hit pending interrupts
2577          * prior to completing driver initialization.
2578          */
2579         if (unlikely(adapter->state != VNIC_OPEN))
2580                 return IRQ_NONE;
2581
2582         adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2583
2584         if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2585                 disable_scrq_irq(adapter, scrq);
2586                 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2587         }
2588
2589         return IRQ_HANDLED;
2590 }
2591
2592 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2593 {
2594         struct device *dev = &adapter->vdev->dev;
2595         struct ibmvnic_sub_crq_queue *scrq;
2596         int i = 0, j = 0;
2597         int rc = 0;
2598
2599         for (i = 0; i < adapter->req_tx_queues; i++) {
2600                 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2601                            i);
2602                 scrq = adapter->tx_scrq[i];
2603                 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2604
2605                 if (!scrq->irq) {
2606                         rc = -EINVAL;
2607                         dev_err(dev, "Error mapping irq\n");
2608                         goto req_tx_irq_failed;
2609                 }
2610
2611                 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2612                                  0, "ibmvnic_tx", scrq);
2613
2614                 if (rc) {
2615                         dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2616                                 scrq->irq, rc);
2617                         irq_dispose_mapping(scrq->irq);
2618                         goto req_tx_irq_failed;
2619                 }
2620         }
2621
2622         for (i = 0; i < adapter->req_rx_queues; i++) {
2623                 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2624                            i);
2625                 scrq = adapter->rx_scrq[i];
2626                 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2627                 if (!scrq->irq) {
2628                         rc = -EINVAL;
2629                         dev_err(dev, "Error mapping irq\n");
2630                         goto req_rx_irq_failed;
2631                 }
2632                 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2633                                  0, "ibmvnic_rx", scrq);
2634                 if (rc) {
2635                         dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2636                                 scrq->irq, rc);
2637                         irq_dispose_mapping(scrq->irq);
2638                         goto req_rx_irq_failed;
2639                 }
2640         }
2641         return rc;
2642
2643 req_rx_irq_failed:
2644         for (j = 0; j < i; j++) {
2645                 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2646                 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2647         }
2648         i = adapter->req_tx_queues;
2649 req_tx_irq_failed:
2650         for (j = 0; j < i; j++) {
2651                 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2652                 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
2653         }
2654         release_sub_crqs(adapter, 1);
2655         return rc;
2656 }
2657
2658 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
2659 {
2660         struct device *dev = &adapter->vdev->dev;
2661         struct ibmvnic_sub_crq_queue **allqueues;
2662         int registered_queues = 0;
2663         int total_queues;
2664         int more = 0;
2665         int i;
2666
2667         total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2668
2669         allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
2670         if (!allqueues)
2671                 return -1;
2672
2673         for (i = 0; i < total_queues; i++) {
2674                 allqueues[i] = init_sub_crq_queue(adapter);
2675                 if (!allqueues[i]) {
2676                         dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2677                         break;
2678                 }
2679                 registered_queues++;
2680         }
2681
2682         /* Make sure we were able to register the minimum number of queues */
2683         if (registered_queues <
2684             adapter->min_tx_queues + adapter->min_rx_queues) {
2685                 dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
2686                 goto tx_failed;
2687         }
2688
2689         /* Distribute the failed allocated queues*/
2690         for (i = 0; i < total_queues - registered_queues + more ; i++) {
2691                 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2692                 switch (i % 3) {
2693                 case 0:
2694                         if (adapter->req_rx_queues > adapter->min_rx_queues)
2695                                 adapter->req_rx_queues--;
2696                         else
2697                                 more++;
2698                         break;
2699                 case 1:
2700                         if (adapter->req_tx_queues > adapter->min_tx_queues)
2701                                 adapter->req_tx_queues--;
2702                         else
2703                                 more++;
2704                         break;
2705                 }
2706         }
2707
2708         adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
2709                                    sizeof(*adapter->tx_scrq), GFP_KERNEL);
2710         if (!adapter->tx_scrq)
2711                 goto tx_failed;
2712
2713         for (i = 0; i < adapter->req_tx_queues; i++) {
2714                 adapter->tx_scrq[i] = allqueues[i];
2715                 adapter->tx_scrq[i]->pool_index = i;
2716                 adapter->num_active_tx_scrqs++;
2717         }
2718
2719         adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
2720                                    sizeof(*adapter->rx_scrq), GFP_KERNEL);
2721         if (!adapter->rx_scrq)
2722                 goto rx_failed;
2723
2724         for (i = 0; i < adapter->req_rx_queues; i++) {
2725                 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2726                 adapter->rx_scrq[i]->scrq_num = i;
2727                 adapter->num_active_rx_scrqs++;
2728         }
2729
2730         kfree(allqueues);
2731         return 0;
2732
2733 rx_failed:
2734         kfree(adapter->tx_scrq);
2735         adapter->tx_scrq = NULL;
2736 tx_failed:
2737         for (i = 0; i < registered_queues; i++)
2738                 release_sub_crq_queue(adapter, allqueues[i], 1);
2739         kfree(allqueues);
2740         return -1;
2741 }
2742
2743 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2744 {
2745         struct device *dev = &adapter->vdev->dev;
2746         union ibmvnic_crq crq;
2747         int max_entries;
2748
2749         if (!retry) {
2750                 /* Sub-CRQ entries are 32 byte long */
2751                 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2752
2753                 if (adapter->min_tx_entries_per_subcrq > entries_page ||
2754                     adapter->min_rx_add_entries_per_subcrq > entries_page) {
2755                         dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2756                         return;
2757                 }
2758
2759                 if (adapter->desired.mtu)
2760                         adapter->req_mtu = adapter->desired.mtu;
2761                 else
2762                         adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2763
2764                 if (!adapter->desired.tx_entries)
2765                         adapter->desired.tx_entries =
2766                                         adapter->max_tx_entries_per_subcrq;
2767                 if (!adapter->desired.rx_entries)
2768                         adapter->desired.rx_entries =
2769                                         adapter->max_rx_add_entries_per_subcrq;
2770
2771                 max_entries = IBMVNIC_MAX_LTB_SIZE /
2772                               (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
2773
2774                 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2775                         adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
2776                         adapter->desired.tx_entries = max_entries;
2777                 }
2778
2779                 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2780                         adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
2781                         adapter->desired.rx_entries = max_entries;
2782                 }
2783
2784                 if (adapter->desired.tx_entries)
2785                         adapter->req_tx_entries_per_subcrq =
2786                                         adapter->desired.tx_entries;
2787                 else
2788                         adapter->req_tx_entries_per_subcrq =
2789                                         adapter->max_tx_entries_per_subcrq;
2790
2791                 if (adapter->desired.rx_entries)
2792                         adapter->req_rx_add_entries_per_subcrq =
2793                                         adapter->desired.rx_entries;
2794                 else
2795                         adapter->req_rx_add_entries_per_subcrq =
2796                                         adapter->max_rx_add_entries_per_subcrq;
2797
2798                 if (adapter->desired.tx_queues)
2799                         adapter->req_tx_queues =
2800                                         adapter->desired.tx_queues;
2801                 else
2802                         adapter->req_tx_queues =
2803                                         adapter->opt_tx_comp_sub_queues;
2804
2805                 if (adapter->desired.rx_queues)
2806                         adapter->req_rx_queues =
2807                                         adapter->desired.rx_queues;
2808                 else
2809                         adapter->req_rx_queues =
2810                                         adapter->opt_rx_comp_queues;
2811
2812                 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
2813         }
2814
2815         memset(&crq, 0, sizeof(crq));
2816         crq.request_capability.first = IBMVNIC_CRQ_CMD;
2817         crq.request_capability.cmd = REQUEST_CAPABILITY;
2818
2819         crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
2820         crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
2821         atomic_inc(&adapter->running_cap_crqs);
2822         ibmvnic_send_crq(adapter, &crq);
2823
2824         crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
2825         crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
2826         atomic_inc(&adapter->running_cap_crqs);
2827         ibmvnic_send_crq(adapter, &crq);
2828
2829         crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
2830         crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
2831         atomic_inc(&adapter->running_cap_crqs);
2832         ibmvnic_send_crq(adapter, &crq);
2833
2834         crq.request_capability.capability =
2835             cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2836         crq.request_capability.number =
2837             cpu_to_be64(adapter->req_tx_entries_per_subcrq);
2838         atomic_inc(&adapter->running_cap_crqs);
2839         ibmvnic_send_crq(adapter, &crq);
2840
2841         crq.request_capability.capability =
2842             cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2843         crq.request_capability.number =
2844             cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
2845         atomic_inc(&adapter->running_cap_crqs);
2846         ibmvnic_send_crq(adapter, &crq);
2847
2848         crq.request_capability.capability = cpu_to_be16(REQ_MTU);
2849         crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
2850         atomic_inc(&adapter->running_cap_crqs);
2851         ibmvnic_send_crq(adapter, &crq);
2852
2853         if (adapter->netdev->flags & IFF_PROMISC) {
2854                 if (adapter->promisc_supported) {
2855                         crq.request_capability.capability =
2856                             cpu_to_be16(PROMISC_REQUESTED);
2857                         crq.request_capability.number = cpu_to_be64(1);
2858                         atomic_inc(&adapter->running_cap_crqs);
2859                         ibmvnic_send_crq(adapter, &crq);
2860                 }
2861         } else {
2862                 crq.request_capability.capability =
2863                     cpu_to_be16(PROMISC_REQUESTED);
2864                 crq.request_capability.number = cpu_to_be64(0);
2865                 atomic_inc(&adapter->running_cap_crqs);
2866                 ibmvnic_send_crq(adapter, &crq);
2867         }
2868 }
2869
2870 static int pending_scrq(struct ibmvnic_adapter *adapter,
2871                         struct ibmvnic_sub_crq_queue *scrq)
2872 {
2873         union sub_crq *entry = &scrq->msgs[scrq->cur];
2874
2875         if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
2876                 return 1;
2877         else
2878                 return 0;
2879 }
2880
2881 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
2882                                         struct ibmvnic_sub_crq_queue *scrq)
2883 {
2884         union sub_crq *entry;
2885         unsigned long flags;
2886
2887         spin_lock_irqsave(&scrq->lock, flags);
2888         entry = &scrq->msgs[scrq->cur];
2889         if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2890                 if (++scrq->cur == scrq->size)
2891                         scrq->cur = 0;
2892         } else {
2893                 entry = NULL;
2894         }
2895         spin_unlock_irqrestore(&scrq->lock, flags);
2896
2897         return entry;
2898 }
2899
2900 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
2901 {
2902         struct ibmvnic_crq_queue *queue = &adapter->crq;
2903         union ibmvnic_crq *crq;
2904
2905         crq = &queue->msgs[queue->cur];
2906         if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2907                 if (++queue->cur == queue->size)
2908                         queue->cur = 0;
2909         } else {
2910                 crq = NULL;
2911         }
2912
2913         return crq;
2914 }
2915
2916 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
2917                        union sub_crq *sub_crq)
2918 {
2919         unsigned int ua = adapter->vdev->unit_address;
2920         struct device *dev = &adapter->vdev->dev;
2921         u64 *u64_crq = (u64 *)sub_crq;
2922         int rc;
2923
2924         netdev_dbg(adapter->netdev,
2925                    "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2926                    (unsigned long int)cpu_to_be64(remote_handle),
2927                    (unsigned long int)cpu_to_be64(u64_crq[0]),
2928                    (unsigned long int)cpu_to_be64(u64_crq[1]),
2929                    (unsigned long int)cpu_to_be64(u64_crq[2]),
2930                    (unsigned long int)cpu_to_be64(u64_crq[3]));
2931
2932         /* Make sure the hypervisor sees the complete request */
2933         mb();
2934
2935         rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2936                                 cpu_to_be64(remote_handle),
2937                                 cpu_to_be64(u64_crq[0]),
2938                                 cpu_to_be64(u64_crq[1]),
2939                                 cpu_to_be64(u64_crq[2]),
2940                                 cpu_to_be64(u64_crq[3]));
2941
2942         if (rc) {
2943                 if (rc == H_CLOSED)
2944                         dev_warn(dev, "CRQ Queue closed\n");
2945                 dev_err(dev, "Send error (rc=%d)\n", rc);
2946         }
2947
2948         return rc;
2949 }
2950
2951 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
2952                                 u64 remote_handle, u64 ioba, u64 num_entries)
2953 {
2954         unsigned int ua = adapter->vdev->unit_address;
2955         struct device *dev = &adapter->vdev->dev;
2956         int rc;
2957
2958         /* Make sure the hypervisor sees the complete request */
2959         mb();
2960         rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
2961                                 cpu_to_be64(remote_handle),
2962                                 ioba, num_entries);
2963
2964         if (rc) {
2965                 if (rc == H_CLOSED)
2966                         dev_warn(dev, "CRQ Queue closed\n");
2967                 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
2968         }
2969
2970         return rc;
2971 }
2972
2973 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
2974                             union ibmvnic_crq *crq)
2975 {
2976         unsigned int ua = adapter->vdev->unit_address;
2977         struct device *dev = &adapter->vdev->dev;
2978         u64 *u64_crq = (u64 *)crq;
2979         int rc;
2980
2981         netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
2982                    (unsigned long int)cpu_to_be64(u64_crq[0]),
2983                    (unsigned long int)cpu_to_be64(u64_crq[1]));
2984
2985         /* Make sure the hypervisor sees the complete request */
2986         mb();
2987
2988         rc = plpar_hcall_norets(H_SEND_CRQ, ua,
2989                                 cpu_to_be64(u64_crq[0]),
2990                                 cpu_to_be64(u64_crq[1]));
2991
2992         if (rc) {
2993                 if (rc == H_CLOSED) {
2994                         dev_warn(dev, "CRQ Queue closed\n");
2995                         if (adapter->resetting)
2996                                 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
2997                 }
2998
2999                 dev_warn(dev, "Send error (rc=%d)\n", rc);
3000         }
3001
3002         return rc;
3003 }
3004
3005 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3006 {
3007         union ibmvnic_crq crq;
3008
3009         memset(&crq, 0, sizeof(crq));
3010         crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3011         crq.generic.cmd = IBMVNIC_CRQ_INIT;
3012         netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3013
3014         return ibmvnic_send_crq(adapter, &crq);
3015 }
3016
3017 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3018 {
3019         union ibmvnic_crq crq;
3020
3021         memset(&crq, 0, sizeof(crq));
3022         crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3023         crq.version_exchange.cmd = VERSION_EXCHANGE;
3024         crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3025
3026         return ibmvnic_send_crq(adapter, &crq);
3027 }
3028
3029 struct vnic_login_client_data {
3030         u8      type;
3031         __be16  len;
3032         char    name;
3033 } __packed;
3034
3035 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3036 {
3037         int len;
3038
3039         /* Calculate the amount of buffer space needed for the
3040          * vnic client data in the login buffer. There are four entries,
3041          * OS name, LPAR name, device name, and a null last entry.
3042          */
3043         len = 4 * sizeof(struct vnic_login_client_data);
3044         len += 6; /* "Linux" plus NULL */
3045         len += strlen(utsname()->nodename) + 1;
3046         len += strlen(adapter->netdev->name) + 1;
3047
3048         return len;
3049 }
3050
3051 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3052                                  struct vnic_login_client_data *vlcd)
3053 {
3054         const char *os_name = "Linux";
3055         int len;
3056
3057         /* Type 1 - LPAR OS */
3058         vlcd->type = 1;
3059         len = strlen(os_name) + 1;
3060         vlcd->len = cpu_to_be16(len);
3061         strncpy(&vlcd->name, os_name, len);
3062         vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
3063
3064         /* Type 2 - LPAR name */
3065         vlcd->type = 2;
3066         len = strlen(utsname()->nodename) + 1;
3067         vlcd->len = cpu_to_be16(len);
3068         strncpy(&vlcd->name, utsname()->nodename, len);
3069         vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
3070
3071         /* Type 3 - device name */
3072         vlcd->type = 3;
3073         len = strlen(adapter->netdev->name) + 1;
3074         vlcd->len = cpu_to_be16(len);
3075         strncpy(&vlcd->name, adapter->netdev->name, len);
3076 }
3077
3078 static int send_login(struct ibmvnic_adapter *adapter)
3079 {
3080         struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3081         struct ibmvnic_login_buffer *login_buffer;
3082         struct device *dev = &adapter->vdev->dev;
3083         dma_addr_t rsp_buffer_token;
3084         dma_addr_t buffer_token;
3085         size_t rsp_buffer_size;
3086         union ibmvnic_crq crq;
3087         size_t buffer_size;
3088         __be64 *tx_list_p;
3089         __be64 *rx_list_p;
3090         int client_data_len;
3091         struct vnic_login_client_data *vlcd;
3092         int i;
3093
3094         if (!adapter->tx_scrq || !adapter->rx_scrq) {
3095                 netdev_err(adapter->netdev,
3096                            "RX or TX queues are not allocated, device login failed\n");
3097                 return -1;
3098         }
3099
3100         release_login_rsp_buffer(adapter);
3101         client_data_len = vnic_client_data_len(adapter);
3102
3103         buffer_size =
3104             sizeof(struct ibmvnic_login_buffer) +
3105             sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3106             client_data_len;
3107
3108         login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3109         if (!login_buffer)
3110                 goto buf_alloc_failed;
3111
3112         buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3113                                       DMA_TO_DEVICE);
3114         if (dma_mapping_error(dev, buffer_token)) {
3115                 dev_err(dev, "Couldn't map login buffer\n");
3116                 goto buf_map_failed;
3117         }
3118
3119         rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3120                           sizeof(u64) * adapter->req_tx_queues +
3121                           sizeof(u64) * adapter->req_rx_queues +
3122                           sizeof(u64) * adapter->req_rx_queues +
3123                           sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3124
3125         login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3126         if (!login_rsp_buffer)
3127                 goto buf_rsp_alloc_failed;
3128
3129         rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3130                                           rsp_buffer_size, DMA_FROM_DEVICE);
3131         if (dma_mapping_error(dev, rsp_buffer_token)) {
3132                 dev_err(dev, "Couldn't map login rsp buffer\n");
3133                 goto buf_rsp_map_failed;
3134         }
3135
3136         adapter->login_buf = login_buffer;
3137         adapter->login_buf_token = buffer_token;
3138         adapter->login_buf_sz = buffer_size;
3139         adapter->login_rsp_buf = login_rsp_buffer;
3140         adapter->login_rsp_buf_token = rsp_buffer_token;
3141         adapter->login_rsp_buf_sz = rsp_buffer_size;
3142
3143         login_buffer->len = cpu_to_be32(buffer_size);
3144         login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3145         login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3146         login_buffer->off_txcomp_subcrqs =
3147             cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3148         login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3149         login_buffer->off_rxcomp_subcrqs =
3150             cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3151                         sizeof(u64) * adapter->req_tx_queues);
3152         login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3153         login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3154
3155         tx_list_p = (__be64 *)((char *)login_buffer +
3156                                       sizeof(struct ibmvnic_login_buffer));
3157         rx_list_p = (__be64 *)((char *)login_buffer +
3158                                       sizeof(struct ibmvnic_login_buffer) +
3159                                       sizeof(u64) * adapter->req_tx_queues);
3160
3161         for (i = 0; i < adapter->req_tx_queues; i++) {
3162                 if (adapter->tx_scrq[i]) {
3163                         tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3164                                                    crq_num);
3165                 }
3166         }
3167
3168         for (i = 0; i < adapter->req_rx_queues; i++) {
3169                 if (adapter->rx_scrq[i]) {
3170                         rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3171                                                    crq_num);
3172                 }
3173         }
3174
3175         /* Insert vNIC login client data */
3176         vlcd = (struct vnic_login_client_data *)
3177                 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3178         login_buffer->client_data_offset =
3179                         cpu_to_be32((char *)vlcd - (char *)login_buffer);
3180         login_buffer->client_data_len = cpu_to_be32(client_data_len);
3181
3182         vnic_add_client_data(adapter, vlcd);
3183
3184         netdev_dbg(adapter->netdev, "Login Buffer:\n");
3185         for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3186                 netdev_dbg(adapter->netdev, "%016lx\n",
3187                            ((unsigned long int *)(adapter->login_buf))[i]);
3188         }
3189
3190         memset(&crq, 0, sizeof(crq));
3191         crq.login.first = IBMVNIC_CRQ_CMD;
3192         crq.login.cmd = LOGIN;
3193         crq.login.ioba = cpu_to_be32(buffer_token);
3194         crq.login.len = cpu_to_be32(buffer_size);
3195         ibmvnic_send_crq(adapter, &crq);
3196
3197         return 0;
3198
3199 buf_rsp_map_failed:
3200         kfree(login_rsp_buffer);
3201 buf_rsp_alloc_failed:
3202         dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3203 buf_map_failed:
3204         kfree(login_buffer);
3205 buf_alloc_failed:
3206         return -1;
3207 }
3208
3209 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3210                              u32 len, u8 map_id)
3211 {
3212         union ibmvnic_crq crq;
3213
3214         memset(&crq, 0, sizeof(crq));
3215         crq.request_map.first = IBMVNIC_CRQ_CMD;
3216         crq.request_map.cmd = REQUEST_MAP;
3217         crq.request_map.map_id = map_id;
3218         crq.request_map.ioba = cpu_to_be32(addr);
3219         crq.request_map.len = cpu_to_be32(len);
3220         ibmvnic_send_crq(adapter, &crq);
3221 }
3222
3223 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3224 {
3225         union ibmvnic_crq crq;
3226
3227         memset(&crq, 0, sizeof(crq));
3228         crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3229         crq.request_unmap.cmd = REQUEST_UNMAP;
3230         crq.request_unmap.map_id = map_id;
3231         ibmvnic_send_crq(adapter, &crq);
3232 }
3233
3234 static void send_map_query(struct ibmvnic_adapter *adapter)
3235 {
3236         union ibmvnic_crq crq;
3237
3238         memset(&crq, 0, sizeof(crq));
3239         crq.query_map.first = IBMVNIC_CRQ_CMD;
3240         crq.query_map.cmd = QUERY_MAP;
3241         ibmvnic_send_crq(adapter, &crq);
3242 }
3243
3244 /* Send a series of CRQs requesting various capabilities of the VNIC server */
3245 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3246 {
3247         union ibmvnic_crq crq;
3248
3249         atomic_set(&adapter->running_cap_crqs, 0);
3250         memset(&crq, 0, sizeof(crq));
3251         crq.query_capability.first = IBMVNIC_CRQ_CMD;
3252         crq.query_capability.cmd = QUERY_CAPABILITY;
3253
3254         crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3255         atomic_inc(&adapter->running_cap_crqs);
3256         ibmvnic_send_crq(adapter, &crq);
3257
3258         crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3259         atomic_inc(&adapter->running_cap_crqs);
3260         ibmvnic_send_crq(adapter, &crq);
3261
3262         crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3263         atomic_inc(&adapter->running_cap_crqs);
3264         ibmvnic_send_crq(adapter, &crq);
3265
3266         crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3267         atomic_inc(&adapter->running_cap_crqs);
3268         ibmvnic_send_crq(adapter, &crq);
3269
3270         crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3271         atomic_inc(&adapter->running_cap_crqs);
3272         ibmvnic_send_crq(adapter, &crq);
3273
3274         crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3275         atomic_inc(&adapter->running_cap_crqs);
3276         ibmvnic_send_crq(adapter, &crq);
3277
3278         crq.query_capability.capability =
3279             cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3280         atomic_inc(&adapter->running_cap_crqs);
3281         ibmvnic_send_crq(adapter, &crq);
3282
3283         crq.query_capability.capability =
3284             cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3285         atomic_inc(&adapter->running_cap_crqs);
3286         ibmvnic_send_crq(adapter, &crq);
3287
3288         crq.query_capability.capability =
3289             cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3290         atomic_inc(&adapter->running_cap_crqs);
3291         ibmvnic_send_crq(adapter, &crq);
3292
3293         crq.query_capability.capability =
3294             cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3295         atomic_inc(&adapter->running_cap_crqs);
3296         ibmvnic_send_crq(adapter, &crq);
3297
3298         crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3299         atomic_inc(&adapter->running_cap_crqs);
3300         ibmvnic_send_crq(adapter, &crq);
3301
3302         crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3303         atomic_inc(&adapter->running_cap_crqs);
3304         ibmvnic_send_crq(adapter, &crq);
3305
3306         crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3307         atomic_inc(&adapter->running_cap_crqs);
3308         ibmvnic_send_crq(adapter, &crq);
3309
3310         crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3311         atomic_inc(&adapter->running_cap_crqs);
3312         ibmvnic_send_crq(adapter, &crq);
3313
3314         crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3315         atomic_inc(&adapter->running_cap_crqs);
3316         ibmvnic_send_crq(adapter, &crq);
3317
3318         crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3319         atomic_inc(&adapter->running_cap_crqs);
3320         ibmvnic_send_crq(adapter, &crq);
3321
3322         crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3323         atomic_inc(&adapter->running_cap_crqs);
3324         ibmvnic_send_crq(adapter, &crq);
3325
3326         crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3327         atomic_inc(&adapter->running_cap_crqs);
3328         ibmvnic_send_crq(adapter, &crq);
3329
3330         crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3331         atomic_inc(&adapter->running_cap_crqs);
3332         ibmvnic_send_crq(adapter, &crq);
3333
3334         crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3335         atomic_inc(&adapter->running_cap_crqs);
3336         ibmvnic_send_crq(adapter, &crq);
3337
3338         crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3339         atomic_inc(&adapter->running_cap_crqs);
3340         ibmvnic_send_crq(adapter, &crq);
3341
3342         crq.query_capability.capability =
3343                         cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3344         atomic_inc(&adapter->running_cap_crqs);
3345         ibmvnic_send_crq(adapter, &crq);
3346
3347         crq.query_capability.capability =
3348                         cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3349         atomic_inc(&adapter->running_cap_crqs);
3350         ibmvnic_send_crq(adapter, &crq);
3351
3352         crq.query_capability.capability =
3353                         cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3354         atomic_inc(&adapter->running_cap_crqs);
3355         ibmvnic_send_crq(adapter, &crq);
3356
3357         crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3358         atomic_inc(&adapter->running_cap_crqs);
3359         ibmvnic_send_crq(adapter, &crq);
3360 }
3361
3362 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3363                                 struct ibmvnic_adapter *adapter)
3364 {
3365         struct device *dev = &adapter->vdev->dev;
3366
3367         if (crq->get_vpd_size_rsp.rc.code) {
3368                 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3369                         crq->get_vpd_size_rsp.rc.code);
3370                 complete(&adapter->fw_done);
3371                 return;
3372         }
3373
3374         adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3375         complete(&adapter->fw_done);
3376 }
3377
3378 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3379                            struct ibmvnic_adapter *adapter)
3380 {
3381         struct device *dev = &adapter->vdev->dev;
3382         unsigned char *substr = NULL;
3383         u8 fw_level_len = 0;
3384
3385         memset(adapter->fw_version, 0, 32);
3386
3387         dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3388                          DMA_FROM_DEVICE);
3389
3390         if (crq->get_vpd_rsp.rc.code) {
3391                 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3392                         crq->get_vpd_rsp.rc.code);
3393                 goto complete;
3394         }
3395
3396         /* get the position of the firmware version info
3397          * located after the ASCII 'RM' substring in the buffer
3398          */
3399         substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3400         if (!substr) {
3401                 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3402                 goto complete;
3403         }
3404
3405         /* get length of firmware level ASCII substring */
3406         if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3407                 fw_level_len = *(substr + 2);
3408         } else {
3409                 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3410                 goto complete;
3411         }
3412
3413         /* copy firmware version string from vpd into adapter */
3414         if ((substr + 3 + fw_level_len) <
3415             (adapter->vpd->buff + adapter->vpd->len)) {
3416                 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3417         } else {
3418                 dev_info(dev, "FW substr extrapolated VPD buff\n");
3419         }
3420
3421 complete:
3422         if (adapter->fw_version[0] == '\0')
3423                 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3424         complete(&adapter->fw_done);
3425 }
3426
3427 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3428 {
3429         struct device *dev = &adapter->vdev->dev;
3430         struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3431         union ibmvnic_crq crq;
3432         int i;
3433
3434         dma_unmap_single(dev, adapter->ip_offload_tok,
3435                          sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3436
3437         netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3438         for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3439                 netdev_dbg(adapter->netdev, "%016lx\n",
3440                            ((unsigned long int *)(buf))[i]);
3441
3442         netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3443         netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3444         netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3445                    buf->tcp_ipv4_chksum);
3446         netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3447                    buf->tcp_ipv6_chksum);
3448         netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3449                    buf->udp_ipv4_chksum);
3450         netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3451                    buf->udp_ipv6_chksum);
3452         netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3453                    buf->large_tx_ipv4);
3454         netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3455                    buf->large_tx_ipv6);
3456         netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3457                    buf->large_rx_ipv4);
3458         netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3459                    buf->large_rx_ipv6);
3460         netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3461                    buf->max_ipv4_header_size);
3462         netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3463                    buf->max_ipv6_header_size);
3464         netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3465                    buf->max_tcp_header_size);
3466         netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3467                    buf->max_udp_header_size);
3468         netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3469                    buf->max_large_tx_size);
3470         netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3471                    buf->max_large_rx_size);
3472         netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3473                    buf->ipv6_extension_header);
3474         netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3475                    buf->tcp_pseudosum_req);
3476         netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3477                    buf->num_ipv6_ext_headers);
3478         netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3479                    buf->off_ipv6_ext_headers);
3480
3481         adapter->ip_offload_ctrl_tok =
3482             dma_map_single(dev, &adapter->ip_offload_ctrl,
3483                            sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3484
3485         if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3486                 dev_err(dev, "Couldn't map ip offload control buffer\n");
3487                 return;
3488         }
3489
3490         adapter->ip_offload_ctrl.len =
3491             cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3492         adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3493         adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3494         adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3495         adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3496         adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3497         adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3498         adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3499         adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3500         adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3501
3502         /* large_rx disabled for now, additional features needed */
3503         adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3504         adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3505
3506         adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
3507
3508         if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3509                 adapter->netdev->features |= NETIF_F_IP_CSUM;
3510
3511         if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3512                 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3513
3514         if ((adapter->netdev->features &
3515             (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3516                 adapter->netdev->features |= NETIF_F_RXCSUM;
3517
3518         if (buf->large_tx_ipv4)
3519                 adapter->netdev->features |= NETIF_F_TSO;
3520         if (buf->large_tx_ipv6)
3521                 adapter->netdev->features |= NETIF_F_TSO6;
3522
3523         adapter->netdev->hw_features |= adapter->netdev->features;
3524
3525         memset(&crq, 0, sizeof(crq));
3526         crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3527         crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3528         crq.control_ip_offload.len =
3529             cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3530         crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3531         ibmvnic_send_crq(adapter, &crq);
3532 }
3533
3534 static void handle_error_info_rsp(union ibmvnic_crq *crq,
3535                                   struct ibmvnic_adapter *adapter)
3536 {
3537         struct device *dev = &adapter->vdev->dev;
3538         struct ibmvnic_error_buff *error_buff, *tmp;
3539         unsigned long flags;
3540         bool found = false;
3541         int i;
3542
3543         if (!crq->request_error_rsp.rc.code) {
3544                 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
3545                          crq->request_error_rsp.rc.code);
3546                 return;
3547         }
3548
3549         spin_lock_irqsave(&adapter->error_list_lock, flags);
3550         list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
3551                 if (error_buff->error_id == crq->request_error_rsp.error_id) {
3552                         found = true;
3553                         list_del(&error_buff->list);
3554                         break;
3555                 }
3556         spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3557
3558         if (!found) {
3559                 dev_err(dev, "Couldn't find error id %x\n",
3560                         be32_to_cpu(crq->request_error_rsp.error_id));
3561                 return;
3562         }
3563
3564         dev_err(dev, "Detailed info for error id %x:",
3565                 be32_to_cpu(crq->request_error_rsp.error_id));
3566
3567         for (i = 0; i < error_buff->len; i++) {
3568                 pr_cont("%02x", (int)error_buff->buff[i]);
3569                 if (i % 8 == 7)
3570                         pr_cont(" ");
3571         }
3572         pr_cont("\n");
3573
3574         dma_unmap_single(dev, error_buff->dma, error_buff->len,
3575                          DMA_FROM_DEVICE);
3576         kfree(error_buff->buff);
3577         kfree(error_buff);
3578 }
3579
3580 static void request_error_information(struct ibmvnic_adapter *adapter,
3581                                       union ibmvnic_crq *err_crq)
3582 {
3583         struct device *dev = &adapter->vdev->dev;
3584         struct net_device *netdev = adapter->netdev;
3585         struct ibmvnic_error_buff *error_buff;
3586         unsigned long timeout = msecs_to_jiffies(30000);
3587         union ibmvnic_crq crq;
3588         unsigned long flags;
3589         int rc, detail_len;
3590
3591         error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
3592         if (!error_buff)
3593                 return;
3594
3595         detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
3596         error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
3597         if (!error_buff->buff) {
3598                 kfree(error_buff);
3599                 return;
3600         }
3601
3602         error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
3603                                          DMA_FROM_DEVICE);
3604         if (dma_mapping_error(dev, error_buff->dma)) {
3605                 netdev_err(netdev, "Couldn't map error buffer\n");
3606                 kfree(error_buff->buff);
3607                 kfree(error_buff);
3608                 return;
3609         }
3610
3611         error_buff->len = detail_len;
3612         error_buff->error_id = err_crq->error_indication.error_id;
3613
3614         spin_lock_irqsave(&adapter->error_list_lock, flags);
3615         list_add_tail(&error_buff->list, &adapter->errors);
3616         spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3617
3618         memset(&crq, 0, sizeof(crq));
3619         crq.request_error_info.first = IBMVNIC_CRQ_CMD;
3620         crq.request_error_info.cmd = REQUEST_ERROR_INFO;
3621         crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
3622         crq.request_error_info.len = cpu_to_be32(detail_len);
3623         crq.request_error_info.error_id = err_crq->error_indication.error_id;
3624
3625         rc = ibmvnic_send_crq(adapter, &crq);
3626         if (rc) {
3627                 netdev_err(netdev, "failed to request error information\n");
3628                 goto err_info_fail;
3629         }
3630
3631         if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3632                 netdev_err(netdev, "timeout waiting for error information\n");
3633                 goto err_info_fail;
3634         }
3635
3636         return;
3637
3638 err_info_fail:
3639         spin_lock_irqsave(&adapter->error_list_lock, flags);
3640         list_del(&error_buff->list);
3641         spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3642
3643         kfree(error_buff->buff);
3644         kfree(error_buff);
3645 }
3646
3647 static void handle_error_indication(union ibmvnic_crq *crq,
3648                                     struct ibmvnic_adapter *adapter)
3649 {
3650         struct device *dev = &adapter->vdev->dev;
3651
3652         dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
3653                 crq->error_indication.flags
3654                         & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3655                 be32_to_cpu(crq->error_indication.error_id),
3656                 be16_to_cpu(crq->error_indication.error_cause));
3657
3658         if (be32_to_cpu(crq->error_indication.error_id))
3659                 request_error_information(adapter, crq);
3660
3661         if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3662                 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3663         else
3664                 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
3665 }
3666
3667 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3668                                  struct ibmvnic_adapter *adapter)
3669 {
3670         struct net_device *netdev = adapter->netdev;
3671         struct device *dev = &adapter->vdev->dev;
3672         long rc;
3673
3674         rc = crq->change_mac_addr_rsp.rc.code;
3675         if (rc) {
3676                 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
3677                 goto out;
3678         }
3679         memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3680                ETH_ALEN);
3681 out:
3682         complete(&adapter->fw_done);
3683         return rc;
3684 }
3685
3686 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3687                                    struct ibmvnic_adapter *adapter)
3688 {
3689         struct device *dev = &adapter->vdev->dev;
3690         u64 *req_value;
3691         char *name;
3692
3693         atomic_dec(&adapter->running_cap_crqs);
3694         switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3695         case REQ_TX_QUEUES:
3696                 req_value = &adapter->req_tx_queues;
3697                 name = "tx";
3698                 break;
3699         case REQ_RX_QUEUES:
3700                 req_value = &adapter->req_rx_queues;
3701                 name = "rx";
3702                 break;
3703         case REQ_RX_ADD_QUEUES:
3704                 req_value = &adapter->req_rx_add_queues;
3705                 name = "rx_add";
3706                 break;
3707         case REQ_TX_ENTRIES_PER_SUBCRQ:
3708                 req_value = &adapter->req_tx_entries_per_subcrq;
3709                 name = "tx_entries_per_subcrq";
3710                 break;
3711         case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3712                 req_value = &adapter->req_rx_add_entries_per_subcrq;
3713                 name = "rx_add_entries_per_subcrq";
3714                 break;
3715         case REQ_MTU:
3716                 req_value = &adapter->req_mtu;
3717                 name = "mtu";
3718                 break;
3719         case PROMISC_REQUESTED:
3720                 req_value = &adapter->promisc;
3721                 name = "promisc";
3722                 break;
3723         default:
3724                 dev_err(dev, "Got invalid cap request rsp %d\n",
3725                         crq->request_capability.capability);
3726                 return;
3727         }
3728
3729         switch (crq->request_capability_rsp.rc.code) {
3730         case SUCCESS:
3731                 break;
3732         case PARTIALSUCCESS:
3733                 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3734                          *req_value,
3735                          (long int)be64_to_cpu(crq->request_capability_rsp.
3736                                                number), name);
3737
3738                 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3739                     REQ_MTU) {
3740                         pr_err("mtu of %llu is not supported. Reverting.\n",
3741                                *req_value);
3742                         *req_value = adapter->fallback.mtu;
3743                 } else {
3744                         *req_value =
3745                                 be64_to_cpu(crq->request_capability_rsp.number);
3746                 }
3747
3748                 ibmvnic_send_req_caps(adapter, 1);
3749                 return;
3750         default:
3751                 dev_err(dev, "Error %d in request cap rsp\n",
3752                         crq->request_capability_rsp.rc.code);
3753                 return;
3754         }
3755
3756         /* Done receiving requested capabilities, query IP offload support */
3757         if (atomic_read(&adapter->running_cap_crqs) == 0) {
3758                 union ibmvnic_crq newcrq;
3759                 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3760                 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
3761                     &adapter->ip_offload_buf;
3762
3763                 adapter->wait_capability = false;
3764                 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
3765                                                          buf_sz,
3766                                                          DMA_FROM_DEVICE);
3767
3768                 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
3769                         if (!firmware_has_feature(FW_FEATURE_CMO))
3770                                 dev_err(dev, "Couldn't map offload buffer\n");
3771                         return;
3772                 }
3773
3774                 memset(&newcrq, 0, sizeof(newcrq));
3775                 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
3776                 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
3777                 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
3778                 newcrq.query_ip_offload.ioba =
3779                     cpu_to_be32(adapter->ip_offload_tok);
3780
3781                 ibmvnic_send_crq(adapter, &newcrq);
3782         }
3783 }
3784
3785 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3786                             struct ibmvnic_adapter *adapter)
3787 {
3788         struct device *dev = &adapter->vdev->dev;
3789         struct net_device *netdev = adapter->netdev;
3790         struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
3791         struct ibmvnic_login_buffer *login = adapter->login_buf;
3792         int i;
3793
3794         dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
3795                          DMA_BIDIRECTIONAL);
3796         dma_unmap_single(dev, adapter->login_rsp_buf_token,
3797                          adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
3798
3799         /* If the number of queues requested can't be allocated by the
3800          * server, the login response will return with code 1. We will need
3801          * to resend the login buffer with fewer queues requested.
3802          */
3803         if (login_rsp_crq->generic.rc.code) {
3804                 adapter->renegotiate = true;
3805                 complete(&adapter->init_done);
3806                 return 0;
3807         }
3808
3809         netdev->mtu = adapter->req_mtu - ETH_HLEN;
3810
3811         netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3812         for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
3813                 netdev_dbg(adapter->netdev, "%016lx\n",
3814                            ((unsigned long int *)(adapter->login_rsp_buf))[i]);
3815         }
3816
3817         /* Sanity checks */
3818         if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
3819             (be32_to_cpu(login->num_rxcomp_subcrqs) *
3820              adapter->req_rx_add_queues !=
3821              be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
3822                 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
3823                 ibmvnic_remove(adapter->vdev);
3824                 return -EIO;
3825         }
3826         release_login_buffer(adapter);
3827         complete(&adapter->init_done);
3828
3829         return 0;
3830 }
3831
3832 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
3833                                      struct ibmvnic_adapter *adapter)
3834 {
3835         struct device *dev = &adapter->vdev->dev;
3836         long rc;
3837
3838         rc = crq->request_unmap_rsp.rc.code;
3839         if (rc)
3840                 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
3841 }
3842
3843 static void handle_query_map_rsp(union ibmvnic_crq *crq,
3844                                  struct ibmvnic_adapter *adapter)
3845 {
3846         struct net_device *netdev = adapter->netdev;
3847         struct device *dev = &adapter->vdev->dev;
3848         long rc;
3849
3850         rc = crq->query_map_rsp.rc.code;
3851         if (rc) {
3852                 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
3853                 return;
3854         }
3855         netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
3856                    crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
3857                    crq->query_map_rsp.free_pages);
3858 }
3859
3860 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
3861                                  struct ibmvnic_adapter *adapter)
3862 {
3863         struct net_device *netdev = adapter->netdev;
3864         struct device *dev = &adapter->vdev->dev;
3865         long rc;
3866
3867         atomic_dec(&adapter->running_cap_crqs);
3868         netdev_dbg(netdev, "Outstanding queries: %d\n",
3869                    atomic_read(&adapter->running_cap_crqs));
3870         rc = crq->query_capability.rc.code;
3871         if (rc) {
3872                 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
3873                 goto out;
3874         }
3875
3876         switch (be16_to_cpu(crq->query_capability.capability)) {
3877         case MIN_TX_QUEUES:
3878                 adapter->min_tx_queues =
3879                     be64_to_cpu(crq->query_capability.number);
3880                 netdev_dbg(netdev, "min_tx_queues = %lld\n",
3881                            adapter->min_tx_queues);
3882                 break;
3883         case MIN_RX_QUEUES:
3884                 adapter->min_rx_queues =
3885                     be64_to_cpu(crq->query_capability.number);
3886                 netdev_dbg(netdev, "min_rx_queues = %lld\n",
3887                            adapter->min_rx_queues);
3888                 break;
3889         case MIN_RX_ADD_QUEUES:
3890                 adapter->min_rx_add_queues =
3891                     be64_to_cpu(crq->query_capability.number);
3892                 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
3893                            adapter->min_rx_add_queues);
3894                 break;
3895         case MAX_TX_QUEUES:
3896                 adapter->max_tx_queues =
3897                     be64_to_cpu(crq->query_capability.number);
3898                 netdev_dbg(netdev, "max_tx_queues = %lld\n",
3899                            adapter->max_tx_queues);
3900                 break;
3901         case MAX_RX_QUEUES:
3902                 adapter->max_rx_queues =
3903                     be64_to_cpu(crq->query_capability.number);
3904                 netdev_dbg(netdev, "max_rx_queues = %lld\n",
3905                            adapter->max_rx_queues);
3906                 break;
3907         case MAX_RX_ADD_QUEUES:
3908                 adapter->max_rx_add_queues =
3909                     be64_to_cpu(crq->query_capability.number);
3910                 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
3911                            adapter->max_rx_add_queues);
3912                 break;
3913         case MIN_TX_ENTRIES_PER_SUBCRQ:
3914                 adapter->min_tx_entries_per_subcrq =
3915                     be64_to_cpu(crq->query_capability.number);
3916                 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
3917                            adapter->min_tx_entries_per_subcrq);
3918                 break;
3919         case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
3920                 adapter->min_rx_add_entries_per_subcrq =
3921                     be64_to_cpu(crq->query_capability.number);
3922                 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
3923                            adapter->min_rx_add_entries_per_subcrq);
3924                 break;
3925         case MAX_TX_ENTRIES_PER_SUBCRQ:
3926                 adapter->max_tx_entries_per_subcrq =
3927                     be64_to_cpu(crq->query_capability.number);
3928                 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
3929                            adapter->max_tx_entries_per_subcrq);
3930                 break;
3931         case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
3932                 adapter->max_rx_add_entries_per_subcrq =
3933                     be64_to_cpu(crq->query_capability.number);
3934                 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
3935                            adapter->max_rx_add_entries_per_subcrq);
3936                 break;
3937         case TCP_IP_OFFLOAD:
3938                 adapter->tcp_ip_offload =
3939                     be64_to_cpu(crq->query_capability.number);
3940                 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
3941                            adapter->tcp_ip_offload);
3942                 break;
3943         case PROMISC_SUPPORTED:
3944                 adapter->promisc_supported =
3945                     be64_to_cpu(crq->query_capability.number);
3946                 netdev_dbg(netdev, "promisc_supported = %lld\n",
3947                            adapter->promisc_supported);
3948                 break;
3949         case MIN_MTU:
3950                 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
3951                 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3952                 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
3953                 break;
3954         case MAX_MTU:
3955                 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
3956                 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3957                 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
3958                 break;
3959         case MAX_MULTICAST_FILTERS:
3960                 adapter->max_multicast_filters =
3961                     be64_to_cpu(crq->query_capability.number);
3962                 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
3963                            adapter->max_multicast_filters);
3964                 break;
3965         case VLAN_HEADER_INSERTION:
3966                 adapter->vlan_header_insertion =
3967                     be64_to_cpu(crq->query_capability.number);
3968                 if (adapter->vlan_header_insertion)
3969                         netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
3970                 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
3971                            adapter->vlan_header_insertion);
3972                 break;
3973         case RX_VLAN_HEADER_INSERTION:
3974                 adapter->rx_vlan_header_insertion =
3975                     be64_to_cpu(crq->query_capability.number);
3976                 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
3977                            adapter->rx_vlan_header_insertion);
3978                 break;
3979         case MAX_TX_SG_ENTRIES:
3980                 adapter->max_tx_sg_entries =
3981                     be64_to_cpu(crq->query_capability.number);
3982                 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
3983                            adapter->max_tx_sg_entries);
3984                 break;
3985         case RX_SG_SUPPORTED:
3986                 adapter->rx_sg_supported =
3987                     be64_to_cpu(crq->query_capability.number);
3988                 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
3989                            adapter->rx_sg_supported);
3990                 break;
3991         case OPT_TX_COMP_SUB_QUEUES:
3992                 adapter->opt_tx_comp_sub_queues =
3993                     be64_to_cpu(crq->query_capability.number);
3994                 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
3995                            adapter->opt_tx_comp_sub_queues);
3996                 break;
3997         case OPT_RX_COMP_QUEUES:
3998                 adapter->opt_rx_comp_queues =
3999                     be64_to_cpu(crq->query_capability.number);
4000                 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4001                            adapter->opt_rx_comp_queues);
4002                 break;
4003         case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4004                 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4005                     be64_to_cpu(crq->query_capability.number);
4006                 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4007                            adapter->opt_rx_bufadd_q_per_rx_comp_q);
4008                 break;
4009         case OPT_TX_ENTRIES_PER_SUBCRQ:
4010                 adapter->opt_tx_entries_per_subcrq =
4011                     be64_to_cpu(crq->query_capability.number);
4012                 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4013                            adapter->opt_tx_entries_per_subcrq);
4014                 break;
4015         case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4016                 adapter->opt_rxba_entries_per_subcrq =
4017                     be64_to_cpu(crq->query_capability.number);
4018                 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4019                            adapter->opt_rxba_entries_per_subcrq);
4020                 break;
4021         case TX_RX_DESC_REQ:
4022                 adapter->tx_rx_desc_req = crq->query_capability.number;
4023                 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4024                            adapter->tx_rx_desc_req);
4025                 break;
4026
4027         default:
4028                 netdev_err(netdev, "Got invalid cap rsp %d\n",
4029                            crq->query_capability.capability);
4030         }
4031
4032 out:
4033         if (atomic_read(&adapter->running_cap_crqs) == 0) {
4034                 adapter->wait_capability = false;
4035                 ibmvnic_send_req_caps(adapter, 0);
4036         }
4037 }
4038
4039 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4040                                struct ibmvnic_adapter *adapter)
4041 {
4042         struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4043         struct net_device *netdev = adapter->netdev;
4044         struct device *dev = &adapter->vdev->dev;
4045         u64 *u64_crq = (u64 *)crq;
4046         long rc;
4047
4048         netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4049                    (unsigned long int)cpu_to_be64(u64_crq[0]),
4050                    (unsigned long int)cpu_to_be64(u64_crq[1]));
4051         switch (gen_crq->first) {
4052         case IBMVNIC_CRQ_INIT_RSP:
4053                 switch (gen_crq->cmd) {
4054                 case IBMVNIC_CRQ_INIT:
4055                         dev_info(dev, "Partner initialized\n");
4056                         adapter->from_passive_init = true;
4057                         complete(&adapter->init_done);
4058                         break;
4059                 case IBMVNIC_CRQ_INIT_COMPLETE:
4060                         dev_info(dev, "Partner initialization complete\n");
4061                         send_version_xchg(adapter);
4062                         break;
4063                 default:
4064                         dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4065                 }
4066                 return;
4067         case IBMVNIC_CRQ_XPORT_EVENT:
4068                 netif_carrier_off(netdev);
4069                 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4070                         dev_info(dev, "Migrated, re-enabling adapter\n");
4071                         ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4072                 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4073                         dev_info(dev, "Backing device failover detected\n");
4074                         ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4075                 } else {
4076                         /* The adapter lost the connection */
4077                         dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4078                                 gen_crq->cmd);
4079                         ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4080                 }
4081                 return;
4082         case IBMVNIC_CRQ_CMD_RSP:
4083                 break;
4084         default:
4085                 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4086                         gen_crq->first);
4087                 return;
4088         }
4089
4090         switch (gen_crq->cmd) {
4091         case VERSION_EXCHANGE_RSP:
4092                 rc = crq->version_exchange_rsp.rc.code;
4093                 if (rc) {
4094                         dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4095                         break;
4096                 }
4097                 dev_info(dev, "Partner protocol version is %d\n",
4098                          crq->version_exchange_rsp.version);
4099                 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4100                     ibmvnic_version)
4101                         ibmvnic_version =
4102                             be16_to_cpu(crq->version_exchange_rsp.version);
4103                 send_cap_queries(adapter);
4104                 break;
4105         case QUERY_CAPABILITY_RSP:
4106                 handle_query_cap_rsp(crq, adapter);
4107                 break;
4108         case QUERY_MAP_RSP:
4109                 handle_query_map_rsp(crq, adapter);
4110                 break;
4111         case REQUEST_MAP_RSP:
4112                 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4113                 complete(&adapter->fw_done);
4114                 break;
4115         case REQUEST_UNMAP_RSP:
4116                 handle_request_unmap_rsp(crq, adapter);
4117                 break;
4118         case REQUEST_CAPABILITY_RSP:
4119                 handle_request_cap_rsp(crq, adapter);
4120                 break;
4121         case LOGIN_RSP:
4122                 netdev_dbg(netdev, "Got Login Response\n");
4123                 handle_login_rsp(crq, adapter);
4124                 break;
4125         case LOGICAL_LINK_STATE_RSP:
4126                 netdev_dbg(netdev,
4127                            "Got Logical Link State Response, state: %d rc: %d\n",
4128                            crq->logical_link_state_rsp.link_state,
4129                            crq->logical_link_state_rsp.rc.code);
4130                 adapter->logical_link_state =
4131                     crq->logical_link_state_rsp.link_state;
4132                 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4133                 complete(&adapter->init_done);
4134                 break;
4135         case LINK_STATE_INDICATION:
4136                 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4137                 adapter->phys_link_state =
4138                     crq->link_state_indication.phys_link_state;
4139                 adapter->logical_link_state =
4140                     crq->link_state_indication.logical_link_state;
4141                 break;
4142         case CHANGE_MAC_ADDR_RSP:
4143                 netdev_dbg(netdev, "Got MAC address change Response\n");
4144                 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4145                 break;
4146         case ERROR_INDICATION:
4147                 netdev_dbg(netdev, "Got Error Indication\n");
4148                 handle_error_indication(crq, adapter);
4149                 break;
4150         case REQUEST_ERROR_RSP:
4151                 netdev_dbg(netdev, "Got Error Detail Response\n");
4152                 handle_error_info_rsp(crq, adapter);
4153                 break;
4154         case REQUEST_STATISTICS_RSP:
4155                 netdev_dbg(netdev, "Got Statistics Response\n");
4156                 complete(&adapter->stats_done);
4157                 break;
4158         case QUERY_IP_OFFLOAD_RSP:
4159                 netdev_dbg(netdev, "Got Query IP offload Response\n");
4160                 handle_query_ip_offload_rsp(adapter);
4161                 break;
4162         case MULTICAST_CTRL_RSP:
4163                 netdev_dbg(netdev, "Got multicast control Response\n");
4164                 break;
4165         case CONTROL_IP_OFFLOAD_RSP:
4166                 netdev_dbg(netdev, "Got Control IP offload Response\n");
4167                 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4168                                  sizeof(adapter->ip_offload_ctrl),
4169                                  DMA_TO_DEVICE);
4170                 complete(&adapter->init_done);
4171                 break;
4172         case COLLECT_FW_TRACE_RSP:
4173                 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4174                 complete(&adapter->fw_done);
4175                 break;
4176         case GET_VPD_SIZE_RSP:
4177                 handle_vpd_size_rsp(crq, adapter);
4178                 break;
4179         case GET_VPD_RSP:
4180                 handle_vpd_rsp(crq, adapter);
4181                 break;
4182         default:
4183                 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4184                            gen_crq->cmd);
4185         }
4186 }
4187
4188 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4189 {
4190         struct ibmvnic_adapter *adapter = instance;
4191
4192         tasklet_schedule(&adapter->tasklet);
4193         return IRQ_HANDLED;
4194 }
4195
4196 static void ibmvnic_tasklet(void *data)
4197 {
4198         struct ibmvnic_adapter *adapter = data;
4199         struct ibmvnic_crq_queue *queue = &adapter->crq;
4200         union ibmvnic_crq *crq;
4201         unsigned long flags;
4202         bool done = false;
4203
4204         spin_lock_irqsave(&queue->lock, flags);
4205         while (!done) {
4206                 /* Pull all the valid messages off the CRQ */
4207                 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4208                         ibmvnic_handle_crq(crq, adapter);
4209                         crq->generic.first = 0;
4210                 }
4211
4212                 /* remain in tasklet until all
4213                  * capabilities responses are received
4214                  */
4215                 if (!adapter->wait_capability)
4216                         done = true;
4217         }
4218         /* if capabilities CRQ's were sent in this tasklet, the following
4219          * tasklet must wait until all responses are received
4220          */
4221         if (atomic_read(&adapter->running_cap_crqs) != 0)
4222                 adapter->wait_capability = true;
4223         spin_unlock_irqrestore(&queue->lock, flags);
4224 }
4225
4226 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4227 {
4228         struct vio_dev *vdev = adapter->vdev;
4229         int rc;
4230
4231         do {
4232                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4233         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4234
4235         if (rc)
4236                 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4237
4238         return rc;
4239 }
4240
4241 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4242 {
4243         struct ibmvnic_crq_queue *crq = &adapter->crq;
4244         struct device *dev = &adapter->vdev->dev;
4245         struct vio_dev *vdev = adapter->vdev;
4246         int rc;
4247
4248         /* Close the CRQ */
4249         do {
4250                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4251         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4252
4253         /* Clean out the queue */
4254         memset(crq->msgs, 0, PAGE_SIZE);
4255         crq->cur = 0;
4256
4257         /* And re-open it again */
4258         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4259                                 crq->msg_token, PAGE_SIZE);
4260
4261         if (rc == H_CLOSED)
4262                 /* Adapter is good, but other end is not ready */
4263                 dev_warn(dev, "Partner adapter not ready\n");
4264         else if (rc != 0)
4265                 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4266
4267         return rc;
4268 }
4269
4270 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4271 {
4272         struct ibmvnic_crq_queue *crq = &adapter->crq;
4273         struct vio_dev *vdev = adapter->vdev;
4274         long rc;
4275
4276         if (!crq->msgs)
4277                 return;
4278
4279         netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4280         free_irq(vdev->irq, adapter);
4281         tasklet_kill(&adapter->tasklet);
4282         do {
4283                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4284         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4285
4286         dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4287                          DMA_BIDIRECTIONAL);
4288         free_page((unsigned long)crq->msgs);
4289         crq->msgs = NULL;
4290 }
4291
4292 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4293 {
4294         struct ibmvnic_crq_queue *crq = &adapter->crq;
4295         struct device *dev = &adapter->vdev->dev;
4296         struct vio_dev *vdev = adapter->vdev;
4297         int rc, retrc = -ENOMEM;
4298
4299         if (crq->msgs)
4300                 return 0;
4301
4302         crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4303         /* Should we allocate more than one page? */
4304
4305         if (!crq->msgs)
4306                 return -ENOMEM;
4307
4308         crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4309         crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4310                                         DMA_BIDIRECTIONAL);
4311         if (dma_mapping_error(dev, crq->msg_token))
4312                 goto map_failed;
4313
4314         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4315                                 crq->msg_token, PAGE_SIZE);
4316
4317         if (rc == H_RESOURCE)
4318                 /* maybe kexecing and resource is busy. try a reset */
4319                 rc = ibmvnic_reset_crq(adapter);
4320         retrc = rc;
4321
4322         if (rc == H_CLOSED) {
4323                 dev_warn(dev, "Partner adapter not ready\n");
4324         } else if (rc) {
4325                 dev_warn(dev, "Error %d opening adapter\n", rc);
4326                 goto reg_crq_failed;
4327         }
4328
4329         retrc = 0;
4330
4331         tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4332                      (unsigned long)adapter);
4333
4334         netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4335         rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4336                          adapter);
4337         if (rc) {
4338                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4339                         vdev->irq, rc);
4340                 goto req_irq_failed;
4341         }
4342
4343         rc = vio_enable_interrupts(vdev);
4344         if (rc) {
4345                 dev_err(dev, "Error %d enabling interrupts\n", rc);
4346                 goto req_irq_failed;
4347         }
4348
4349         crq->cur = 0;
4350         spin_lock_init(&crq->lock);
4351
4352         return retrc;
4353
4354 req_irq_failed:
4355         tasklet_kill(&adapter->tasklet);
4356         do {
4357                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4358         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4359 reg_crq_failed:
4360         dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4361 map_failed:
4362         free_page((unsigned long)crq->msgs);
4363         crq->msgs = NULL;
4364         return retrc;
4365 }
4366
4367 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4368 {
4369         struct device *dev = &adapter->vdev->dev;
4370         unsigned long timeout = msecs_to_jiffies(30000);
4371         u64 old_num_rx_queues, old_num_tx_queues;
4372         int rc;
4373
4374         if (adapter->resetting && !adapter->wait_for_reset) {
4375                 rc = ibmvnic_reset_crq(adapter);
4376                 if (!rc)
4377                         rc = vio_enable_interrupts(adapter->vdev);
4378         } else {
4379                 rc = init_crq_queue(adapter);
4380         }
4381
4382         if (rc) {
4383                 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
4384                 return rc;
4385         }
4386
4387         adapter->from_passive_init = false;
4388
4389         old_num_rx_queues = adapter->req_rx_queues;
4390         old_num_tx_queues = adapter->req_tx_queues;
4391
4392         init_completion(&adapter->init_done);
4393         adapter->init_done_rc = 0;
4394         ibmvnic_send_crq_init(adapter);
4395         if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4396                 dev_err(dev, "Initialization sequence timed out\n");
4397                 return -1;
4398         }
4399
4400         if (adapter->init_done_rc) {
4401                 release_crq_queue(adapter);
4402                 return adapter->init_done_rc;
4403         }
4404
4405         if (adapter->from_passive_init) {
4406                 adapter->state = VNIC_OPEN;
4407                 adapter->from_passive_init = false;
4408                 return -1;
4409         }
4410
4411         if (adapter->resetting && !adapter->wait_for_reset) {
4412                 if (adapter->req_rx_queues != old_num_rx_queues ||
4413                     adapter->req_tx_queues != old_num_tx_queues) {
4414                         release_sub_crqs(adapter, 0);
4415                         rc = init_sub_crqs(adapter);
4416                 } else {
4417                         rc = reset_sub_crq_queues(adapter);
4418                 }
4419         } else {
4420                 rc = init_sub_crqs(adapter);
4421         }
4422
4423         if (rc) {
4424                 dev_err(dev, "Initialization of sub crqs failed\n");
4425                 release_crq_queue(adapter);
4426                 return rc;
4427         }
4428
4429         rc = init_sub_crq_irqs(adapter);
4430         if (rc) {
4431                 dev_err(dev, "Failed to initialize sub crq irqs\n");
4432                 release_crq_queue(adapter);
4433         }
4434
4435         rc = init_stats_buffers(adapter);
4436         if (rc)
4437                 return rc;
4438
4439         rc = init_stats_token(adapter);
4440         if (rc)
4441                 return rc;
4442
4443         return rc;
4444 }
4445
4446 static struct device_attribute dev_attr_failover;
4447
4448 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4449 {
4450         struct ibmvnic_adapter *adapter;
4451         struct net_device *netdev;
4452         unsigned char *mac_addr_p;
4453         int rc;
4454
4455         dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4456                 dev->unit_address);
4457
4458         mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4459                                                         VETH_MAC_ADDR, NULL);
4460         if (!mac_addr_p) {
4461                 dev_err(&dev->dev,
4462                         "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4463                         __FILE__, __LINE__);
4464                 return 0;
4465         }
4466
4467         netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4468                                    IBMVNIC_MAX_QUEUES);
4469         if (!netdev)
4470                 return -ENOMEM;
4471
4472         adapter = netdev_priv(netdev);
4473         adapter->state = VNIC_PROBING;
4474         dev_set_drvdata(&dev->dev, netdev);
4475         adapter->vdev = dev;
4476         adapter->netdev = netdev;
4477
4478         ether_addr_copy(adapter->mac_addr, mac_addr_p);
4479         ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4480         netdev->irq = dev->irq;
4481         netdev->netdev_ops = &ibmvnic_netdev_ops;
4482         netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4483         SET_NETDEV_DEV(netdev, &dev->dev);
4484
4485         spin_lock_init(&adapter->stats_lock);
4486
4487         INIT_LIST_HEAD(&adapter->errors);
4488         spin_lock_init(&adapter->error_list_lock);
4489
4490         INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4491         INIT_LIST_HEAD(&adapter->rwi_list);
4492         mutex_init(&adapter->reset_lock);
4493         mutex_init(&adapter->rwi_lock);
4494         adapter->resetting = false;
4495
4496         adapter->mac_change_pending = false;
4497
4498         do {
4499                 rc = ibmvnic_init(adapter);
4500                 if (rc && rc != EAGAIN)
4501                         goto ibmvnic_init_fail;
4502         } while (rc == EAGAIN);
4503
4504         netdev->mtu = adapter->req_mtu - ETH_HLEN;
4505         netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4506         netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4507
4508         rc = device_create_file(&dev->dev, &dev_attr_failover);
4509         if (rc)
4510                 goto ibmvnic_init_fail;
4511
4512         netif_carrier_off(netdev);
4513         rc = register_netdev(netdev);
4514         if (rc) {
4515                 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
4516                 goto ibmvnic_register_fail;
4517         }
4518         dev_info(&dev->dev, "ibmvnic registered\n");
4519
4520         adapter->state = VNIC_PROBED;
4521
4522         adapter->wait_for_reset = false;
4523
4524         return 0;
4525
4526 ibmvnic_register_fail:
4527         device_remove_file(&dev->dev, &dev_attr_failover);
4528
4529 ibmvnic_init_fail:
4530         release_sub_crqs(adapter, 1);
4531         release_crq_queue(adapter);
4532         free_netdev(netdev);
4533
4534         return rc;
4535 }
4536
4537 static int ibmvnic_remove(struct vio_dev *dev)
4538 {
4539         struct net_device *netdev = dev_get_drvdata(&dev->dev);
4540         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4541
4542         adapter->state = VNIC_REMOVING;
4543         unregister_netdev(netdev);
4544         mutex_lock(&adapter->reset_lock);
4545
4546         release_resources(adapter);
4547         release_sub_crqs(adapter, 1);
4548         release_crq_queue(adapter);
4549
4550         release_stats_token(adapter);
4551         release_stats_buffers(adapter);
4552
4553         adapter->state = VNIC_REMOVED;
4554
4555         mutex_unlock(&adapter->reset_lock);
4556         device_remove_file(&dev->dev, &dev_attr_failover);
4557         free_netdev(netdev);
4558         dev_set_drvdata(&dev->dev, NULL);
4559
4560         return 0;
4561 }
4562
4563 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4564                               const char *buf, size_t count)
4565 {
4566         struct net_device *netdev = dev_get_drvdata(dev);
4567         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4568         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4569         __be64 session_token;
4570         long rc;
4571
4572         if (!sysfs_streq(buf, "1"))
4573                 return -EINVAL;
4574
4575         rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4576                          H_GET_SESSION_TOKEN, 0, 0, 0);
4577         if (rc) {
4578                 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4579                            rc);
4580                 return -EINVAL;
4581         }
4582
4583         session_token = (__be64)retbuf[0];
4584         netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4585                    be64_to_cpu(session_token));
4586         rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4587                                 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4588         if (rc) {
4589                 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4590                            rc);
4591                 return -EINVAL;
4592         }
4593
4594         return count;
4595 }
4596
4597 static DEVICE_ATTR_WO(failover);
4598
4599 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4600 {
4601         struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4602         struct ibmvnic_adapter *adapter;
4603         struct iommu_table *tbl;
4604         unsigned long ret = 0;
4605         int i;
4606
4607         tbl = get_iommu_table_base(&vdev->dev);
4608
4609         /* netdev inits at probe time along with the structures we need below*/
4610         if (!netdev)
4611                 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4612
4613         adapter = netdev_priv(netdev);
4614
4615         ret += PAGE_SIZE; /* the crq message queue */
4616         ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4617
4618         for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4619                 ret += 4 * PAGE_SIZE; /* the scrq message queue */
4620
4621         for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4622              i++)
4623                 ret += adapter->rx_pool[i].size *
4624                     IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4625
4626         return ret;
4627 }
4628
4629 static int ibmvnic_resume(struct device *dev)
4630 {
4631         struct net_device *netdev = dev_get_drvdata(dev);
4632         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4633
4634         if (adapter->state != VNIC_OPEN)
4635                 return 0;
4636
4637         tasklet_schedule(&adapter->tasklet);
4638
4639         return 0;
4640 }
4641
4642 static const struct vio_device_id ibmvnic_device_table[] = {
4643         {"network", "IBM,vnic"},
4644         {"", "" }
4645 };
4646 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4647
4648 static const struct dev_pm_ops ibmvnic_pm_ops = {
4649         .resume = ibmvnic_resume
4650 };
4651
4652 static struct vio_driver ibmvnic_driver = {
4653         .id_table       = ibmvnic_device_table,
4654         .probe          = ibmvnic_probe,
4655         .remove         = ibmvnic_remove,
4656         .get_desired_dma = ibmvnic_get_desired_dma,
4657         .name           = ibmvnic_driver_name,
4658         .pm             = &ibmvnic_pm_ops,
4659 };
4660
4661 /* module functions */
4662 static int __init ibmvnic_module_init(void)
4663 {
4664         pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4665                 IBMVNIC_DRIVER_VERSION);
4666
4667         return vio_register_driver(&ibmvnic_driver);
4668 }
4669
4670 static void __exit ibmvnic_module_exit(void)
4671 {
4672         vio_unregister_driver(&ibmvnic_driver);
4673 }
4674
4675 module_init(ibmvnic_module_init);
4676 module_exit(ibmvnic_module_exit);