Correct .gbs.conf settings
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / net / ethernet / brocade / bna / bna_enet.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include "bna.h"
19
20 static inline int
21 ethport_can_be_up(struct bna_ethport *ethport)
22 {
23         int ready = 0;
24         if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
25                 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
26                          (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
27                          (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
28         else
29                 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
30                          (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
31                          !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
32         return ready;
33 }
34
35 #define ethport_is_up ethport_can_be_up
36
37 enum bna_ethport_event {
38         ETHPORT_E_START                 = 1,
39         ETHPORT_E_STOP                  = 2,
40         ETHPORT_E_FAIL                  = 3,
41         ETHPORT_E_UP                    = 4,
42         ETHPORT_E_DOWN                  = 5,
43         ETHPORT_E_FWRESP_UP_OK          = 6,
44         ETHPORT_E_FWRESP_DOWN           = 7,
45         ETHPORT_E_FWRESP_UP_FAIL        = 8,
46 };
47
48 enum bna_enet_event {
49         ENET_E_START                    = 1,
50         ENET_E_STOP                     = 2,
51         ENET_E_FAIL                     = 3,
52         ENET_E_PAUSE_CFG                = 4,
53         ENET_E_MTU_CFG                  = 5,
54         ENET_E_FWRESP_PAUSE             = 6,
55         ENET_E_CHLD_STOPPED             = 7,
56 };
57
58 enum bna_ioceth_event {
59         IOCETH_E_ENABLE                 = 1,
60         IOCETH_E_DISABLE                = 2,
61         IOCETH_E_IOC_RESET              = 3,
62         IOCETH_E_IOC_FAILED             = 4,
63         IOCETH_E_IOC_READY              = 5,
64         IOCETH_E_ENET_ATTR_RESP         = 6,
65         IOCETH_E_ENET_STOPPED           = 7,
66         IOCETH_E_IOC_DISABLED           = 8,
67 };
68
69 #define bna_stats_copy(_name, _type)                                    \
70 do {                                                                    \
71         count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64);  \
72         stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats;   \
73         stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats;        \
74         for (i = 0; i < count; i++)                                     \
75                 stats_dst[i] = be64_to_cpu(stats_src[i]);               \
76 } while (0)                                                             \
77
78 /*
79  * FW response handlers
80  */
81
82 static void
83 bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
84                                 struct bfi_msgq_mhdr *msghdr)
85 {
86         ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
87
88         if (ethport_can_be_up(ethport))
89                 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
90 }
91
92 static void
93 bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
94                                 struct bfi_msgq_mhdr *msghdr)
95 {
96         int ethport_up = ethport_is_up(ethport);
97
98         ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
99
100         if (ethport_up)
101                 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
102 }
103
104 static void
105 bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
106                                 struct bfi_msgq_mhdr *msghdr)
107 {
108         struct bfi_enet_enable_req *admin_req =
109                 &ethport->bfi_enet_cmd.admin_req;
110         struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
111
112         switch (admin_req->enable) {
113         case BNA_STATUS_T_ENABLED:
114                 if (rsp->error == BFI_ENET_CMD_OK)
115                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
116                 else {
117                         ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
118                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
119                 }
120                 break;
121
122         case BNA_STATUS_T_DISABLED:
123                 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
124                 ethport->link_status = BNA_LINK_DOWN;
125                 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
126                 break;
127         }
128 }
129
130 static void
131 bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
132                                 struct bfi_msgq_mhdr *msghdr)
133 {
134         struct bfi_enet_diag_lb_req *diag_lb_req =
135                 &ethport->bfi_enet_cmd.lpbk_req;
136         struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
137
138         switch (diag_lb_req->enable) {
139         case BNA_STATUS_T_ENABLED:
140                 if (rsp->error == BFI_ENET_CMD_OK)
141                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
142                 else {
143                         ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
144                         bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
145                 }
146                 break;
147
148         case BNA_STATUS_T_DISABLED:
149                 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
150                 break;
151         }
152 }
153
154 static void
155 bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
156 {
157         bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
158 }
159
160 static void
161 bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
162                         struct bfi_msgq_mhdr *msghdr)
163 {
164         struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
165
166         /**
167          * Store only if not set earlier, since BNAD can override the HW
168          * attributes
169          */
170         if (!ioceth->attr.fw_query_complete) {
171                 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
172                 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
173                 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
174                 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
175                 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
176                 ioceth->attr.fw_query_complete = true;
177         }
178
179         bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
180 }
181
182 static void
183 bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
184 {
185         struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
186         u64 *stats_src;
187         u64 *stats_dst;
188         u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
189         u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
190         int count;
191         int i;
192
193         bna_stats_copy(mac, mac);
194         bna_stats_copy(bpc, bpc);
195         bna_stats_copy(rad, rad);
196         bna_stats_copy(rlb, rad);
197         bna_stats_copy(fc_rx, fc_rx);
198         bna_stats_copy(fc_tx, fc_tx);
199
200         stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
201
202         /* Copy Rxf stats to SW area, scatter them while copying */
203         for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
204                 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
205                 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
206                 if (rx_enet_mask & ((u32)(1 << i))) {
207                         int k;
208                         count = sizeof(struct bfi_enet_stats_rxf) /
209                                 sizeof(u64);
210                         for (k = 0; k < count; k++) {
211                                 stats_dst[k] = be64_to_cpu(*stats_src);
212                                 stats_src++;
213                         }
214                 }
215         }
216
217         /* Copy Txf stats to SW area, scatter them while copying */
218         for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
219                 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
220                 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
221                 if (tx_enet_mask & ((u32)(1 << i))) {
222                         int k;
223                         count = sizeof(struct bfi_enet_stats_txf) /
224                                 sizeof(u64);
225                         for (k = 0; k < count; k++) {
226                                 stats_dst[k] = be64_to_cpu(*stats_src);
227                                 stats_src++;
228                         }
229                 }
230         }
231
232         bna->stats_mod.stats_get_busy = false;
233         bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
234 }
235
236 static void
237 bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
238                         struct bfi_msgq_mhdr *msghdr)
239 {
240         ethport->link_status = BNA_LINK_UP;
241
242         /* Dispatch events */
243         ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
244 }
245
246 static void
247 bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
248                                 struct bfi_msgq_mhdr *msghdr)
249 {
250         ethport->link_status = BNA_LINK_DOWN;
251
252         /* Dispatch events */
253         ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
254 }
255
256 static void
257 bna_err_handler(struct bna *bna, u32 intr_status)
258 {
259         if (BNA_IS_HALT_INTR(bna, intr_status))
260                 bna_halt_clear(bna);
261
262         bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
263 }
264
265 void
266 bna_mbox_handler(struct bna *bna, u32 intr_status)
267 {
268         if (BNA_IS_ERR_INTR(bna, intr_status)) {
269                 bna_err_handler(bna, intr_status);
270                 return;
271         }
272         if (BNA_IS_MBOX_INTR(bna, intr_status))
273                 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
274 }
275
276 static void
277 bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
278 {
279         struct bna *bna = (struct bna *)arg;
280         struct bna_tx *tx;
281         struct bna_rx *rx;
282
283         switch (msghdr->msg_id) {
284         case BFI_ENET_I2H_RX_CFG_SET_RSP:
285                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
286                 if (rx)
287                         bna_bfi_rx_enet_start_rsp(rx, msghdr);
288                 break;
289
290         case BFI_ENET_I2H_RX_CFG_CLR_RSP:
291                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
292                 if (rx)
293                         bna_bfi_rx_enet_stop_rsp(rx, msghdr);
294                 break;
295
296         case BFI_ENET_I2H_RIT_CFG_RSP:
297         case BFI_ENET_I2H_RSS_CFG_RSP:
298         case BFI_ENET_I2H_RSS_ENABLE_RSP:
299         case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
300         case BFI_ENET_I2H_RX_DEFAULT_RSP:
301         case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
302         case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
303         case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
304         case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
305         case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
306         case BFI_ENET_I2H_RX_VLAN_SET_RSP:
307         case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
308                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
309                 if (rx)
310                         bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
311                 break;
312
313         case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
314                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
315                 if (rx)
316                         bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
317                 break;
318
319         case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
320                 bna_rx_from_rid(bna, msghdr->enet_id, rx);
321                 if (rx)
322                         bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
323                 break;
324
325         case BFI_ENET_I2H_TX_CFG_SET_RSP:
326                 bna_tx_from_rid(bna, msghdr->enet_id, tx);
327                 if (tx)
328                         bna_bfi_tx_enet_start_rsp(tx, msghdr);
329                 break;
330
331         case BFI_ENET_I2H_TX_CFG_CLR_RSP:
332                 bna_tx_from_rid(bna, msghdr->enet_id, tx);
333                 if (tx)
334                         bna_bfi_tx_enet_stop_rsp(tx, msghdr);
335                 break;
336
337         case BFI_ENET_I2H_PORT_ADMIN_RSP:
338                 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
339                 break;
340
341         case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
342                 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
343                 break;
344
345         case BFI_ENET_I2H_SET_PAUSE_RSP:
346                 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
347                 break;
348
349         case BFI_ENET_I2H_GET_ATTR_RSP:
350                 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
351                 break;
352
353         case BFI_ENET_I2H_STATS_GET_RSP:
354                 bna_bfi_stats_get_rsp(bna, msghdr);
355                 break;
356
357         case BFI_ENET_I2H_STATS_CLR_RSP:
358                 /* No-op */
359                 break;
360
361         case BFI_ENET_I2H_LINK_UP_AEN:
362                 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
363                 break;
364
365         case BFI_ENET_I2H_LINK_DOWN_AEN:
366                 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
367                 break;
368
369         case BFI_ENET_I2H_PORT_ENABLE_AEN:
370                 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
371                 break;
372
373         case BFI_ENET_I2H_PORT_DISABLE_AEN:
374                 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
375                 break;
376
377         case BFI_ENET_I2H_BW_UPDATE_AEN:
378                 bna_bfi_bw_update_aen(&bna->tx_mod);
379                 break;
380
381         default:
382                 break;
383         }
384 }
385
386 /* ETHPORT */
387
388 #define call_ethport_stop_cbfn(_ethport)                                \
389 do {                                                                    \
390         if ((_ethport)->stop_cbfn) {                                    \
391                 void (*cbfn)(struct bna_enet *);                        \
392                 cbfn = (_ethport)->stop_cbfn;                           \
393                 (_ethport)->stop_cbfn = NULL;                           \
394                 cbfn(&(_ethport)->bna->enet);                           \
395         }                                                               \
396 } while (0)
397
398 #define call_ethport_adminup_cbfn(ethport, status)                      \
399 do {                                                                    \
400         if ((ethport)->adminup_cbfn) {                                  \
401                 void (*cbfn)(struct bnad *, enum bna_cb_status);        \
402                 cbfn = (ethport)->adminup_cbfn;                         \
403                 (ethport)->adminup_cbfn = NULL;                         \
404                 cbfn((ethport)->bna->bnad, status);                     \
405         }                                                               \
406 } while (0)
407
408 static void
409 bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
410 {
411         struct bfi_enet_enable_req *admin_up_req =
412                 &ethport->bfi_enet_cmd.admin_req;
413
414         bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
415                 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
416         admin_up_req->mh.num_entries = htons(
417                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
418         admin_up_req->enable = BNA_STATUS_T_ENABLED;
419
420         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
421                 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
422         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
423 }
424
425 static void
426 bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
427 {
428         struct bfi_enet_enable_req *admin_down_req =
429                 &ethport->bfi_enet_cmd.admin_req;
430
431         bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
432                 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
433         admin_down_req->mh.num_entries = htons(
434                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
435         admin_down_req->enable = BNA_STATUS_T_DISABLED;
436
437         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
438                 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
439         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
440 }
441
442 static void
443 bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
444 {
445         struct bfi_enet_diag_lb_req *lpbk_up_req =
446                 &ethport->bfi_enet_cmd.lpbk_req;
447
448         bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
449                 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
450         lpbk_up_req->mh.num_entries = htons(
451                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
452         lpbk_up_req->mode = (ethport->bna->enet.type ==
453                                 BNA_ENET_T_LOOPBACK_INTERNAL) ?
454                                 BFI_ENET_DIAG_LB_OPMODE_EXT :
455                                 BFI_ENET_DIAG_LB_OPMODE_CBL;
456         lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
457
458         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
459                 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
460         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
461 }
462
463 static void
464 bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
465 {
466         struct bfi_enet_diag_lb_req *lpbk_down_req =
467                 &ethport->bfi_enet_cmd.lpbk_req;
468
469         bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
470                 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
471         lpbk_down_req->mh.num_entries = htons(
472                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
473         lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
474
475         bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
476                 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
477         bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
478 }
479
480 static void
481 bna_bfi_ethport_up(struct bna_ethport *ethport)
482 {
483         if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
484                 bna_bfi_ethport_admin_up(ethport);
485         else
486                 bna_bfi_ethport_lpbk_up(ethport);
487 }
488
489 static void
490 bna_bfi_ethport_down(struct bna_ethport *ethport)
491 {
492         if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
493                 bna_bfi_ethport_admin_down(ethport);
494         else
495                 bna_bfi_ethport_lpbk_down(ethport);
496 }
497
498 bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
499                         enum bna_ethport_event);
500 bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
501                         enum bna_ethport_event);
502 bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
503                         enum bna_ethport_event);
504 bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
505                         enum bna_ethport_event);
506 bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
507                         enum bna_ethport_event);
508 bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
509                         enum bna_ethport_event);
510
511 static void
512 bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
513 {
514         call_ethport_stop_cbfn(ethport);
515 }
516
517 static void
518 bna_ethport_sm_stopped(struct bna_ethport *ethport,
519                         enum bna_ethport_event event)
520 {
521         switch (event) {
522         case ETHPORT_E_START:
523                 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
524                 break;
525
526         case ETHPORT_E_STOP:
527                 call_ethport_stop_cbfn(ethport);
528                 break;
529
530         case ETHPORT_E_FAIL:
531                 /* No-op */
532                 break;
533
534         case ETHPORT_E_DOWN:
535                 /* This event is received due to Rx objects failing */
536                 /* No-op */
537                 break;
538
539         default:
540                 bfa_sm_fault(event);
541         }
542 }
543
544 static void
545 bna_ethport_sm_down_entry(struct bna_ethport *ethport)
546 {
547 }
548
549 static void
550 bna_ethport_sm_down(struct bna_ethport *ethport,
551                         enum bna_ethport_event event)
552 {
553         switch (event) {
554         case ETHPORT_E_STOP:
555                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
556                 break;
557
558         case ETHPORT_E_FAIL:
559                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
560                 break;
561
562         case ETHPORT_E_UP:
563                 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
564                 bna_bfi_ethport_up(ethport);
565                 break;
566
567         default:
568                 bfa_sm_fault(event);
569         }
570 }
571
572 static void
573 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
574 {
575 }
576
577 static void
578 bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
579                         enum bna_ethport_event event)
580 {
581         switch (event) {
582         case ETHPORT_E_STOP:
583                 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
584                 break;
585
586         case ETHPORT_E_FAIL:
587                 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
588                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
589                 break;
590
591         case ETHPORT_E_DOWN:
592                 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
593                 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
594                 break;
595
596         case ETHPORT_E_FWRESP_UP_OK:
597                 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
598                 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
599                 break;
600
601         case ETHPORT_E_FWRESP_UP_FAIL:
602                 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
603                 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
604                 break;
605
606         case ETHPORT_E_FWRESP_DOWN:
607                 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
608                 bna_bfi_ethport_up(ethport);
609                 break;
610
611         default:
612                 bfa_sm_fault(event);
613         }
614 }
615
616 static void
617 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
618 {
619         /**
620          * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
621          * mbox due to up_resp_wait -> down_resp_wait transition on event
622          * ETHPORT_E_DOWN
623          */
624 }
625
626 static void
627 bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
628                         enum bna_ethport_event event)
629 {
630         switch (event) {
631         case ETHPORT_E_STOP:
632                 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
633                 break;
634
635         case ETHPORT_E_FAIL:
636                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
637                 break;
638
639         case ETHPORT_E_UP:
640                 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
641                 break;
642
643         case ETHPORT_E_FWRESP_UP_OK:
644                 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
645                 bna_bfi_ethport_down(ethport);
646                 break;
647
648         case ETHPORT_E_FWRESP_UP_FAIL:
649         case ETHPORT_E_FWRESP_DOWN:
650                 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
651                 break;
652
653         default:
654                 bfa_sm_fault(event);
655         }
656 }
657
658 static void
659 bna_ethport_sm_up_entry(struct bna_ethport *ethport)
660 {
661 }
662
663 static void
664 bna_ethport_sm_up(struct bna_ethport *ethport,
665                         enum bna_ethport_event event)
666 {
667         switch (event) {
668         case ETHPORT_E_STOP:
669                 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
670                 bna_bfi_ethport_down(ethport);
671                 break;
672
673         case ETHPORT_E_FAIL:
674                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
675                 break;
676
677         case ETHPORT_E_DOWN:
678                 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
679                 bna_bfi_ethport_down(ethport);
680                 break;
681
682         default:
683                 bfa_sm_fault(event);
684         }
685 }
686
687 static void
688 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
689 {
690 }
691
692 static void
693 bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
694                         enum bna_ethport_event event)
695 {
696         switch (event) {
697         case ETHPORT_E_FAIL:
698                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
699                 break;
700
701         case ETHPORT_E_DOWN:
702                 /**
703                  * This event is received due to Rx objects stopping in
704                  * parallel to ethport
705                  */
706                 /* No-op */
707                 break;
708
709         case ETHPORT_E_FWRESP_UP_OK:
710                 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
711                 bna_bfi_ethport_down(ethport);
712                 break;
713
714         case ETHPORT_E_FWRESP_UP_FAIL:
715         case ETHPORT_E_FWRESP_DOWN:
716                 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
717                 break;
718
719         default:
720                 bfa_sm_fault(event);
721         }
722 }
723
724 static void
725 bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
726 {
727         ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
728         ethport->bna = bna;
729
730         ethport->link_status = BNA_LINK_DOWN;
731         ethport->link_cbfn = bnad_cb_ethport_link_status;
732
733         ethport->rx_started_count = 0;
734
735         ethport->stop_cbfn = NULL;
736         ethport->adminup_cbfn = NULL;
737
738         bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
739 }
740
741 static void
742 bna_ethport_uninit(struct bna_ethport *ethport)
743 {
744         ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
745         ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
746
747         ethport->bna = NULL;
748 }
749
750 static void
751 bna_ethport_start(struct bna_ethport *ethport)
752 {
753         bfa_fsm_send_event(ethport, ETHPORT_E_START);
754 }
755
756 static void
757 bna_enet_cb_ethport_stopped(struct bna_enet *enet)
758 {
759         bfa_wc_down(&enet->chld_stop_wc);
760 }
761
762 static void
763 bna_ethport_stop(struct bna_ethport *ethport)
764 {
765         ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
766         bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
767 }
768
769 static void
770 bna_ethport_fail(struct bna_ethport *ethport)
771 {
772         /* Reset the physical port status to enabled */
773         ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
774
775         if (ethport->link_status != BNA_LINK_DOWN) {
776                 ethport->link_status = BNA_LINK_DOWN;
777                 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
778         }
779         bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
780 }
781
782 /* Should be called only when ethport is disabled */
783 void
784 bna_ethport_cb_rx_started(struct bna_ethport *ethport)
785 {
786         ethport->rx_started_count++;
787
788         if (ethport->rx_started_count == 1) {
789                 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
790
791                 if (ethport_can_be_up(ethport))
792                         bfa_fsm_send_event(ethport, ETHPORT_E_UP);
793         }
794 }
795
796 void
797 bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
798 {
799         int ethport_up = ethport_is_up(ethport);
800
801         ethport->rx_started_count--;
802
803         if (ethport->rx_started_count == 0) {
804                 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
805
806                 if (ethport_up)
807                         bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
808         }
809 }
810
811 /* ENET */
812
813 #define bna_enet_chld_start(enet)                                       \
814 do {                                                                    \
815         enum bna_tx_type tx_type =                                      \
816                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
817                 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
818         enum bna_rx_type rx_type =                                      \
819                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
820                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
821         bna_ethport_start(&(enet)->bna->ethport);                       \
822         bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type);                \
823         bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
824 } while (0)
825
826 #define bna_enet_chld_stop(enet)                                        \
827 do {                                                                    \
828         enum bna_tx_type tx_type =                                      \
829                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
830                 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;                   \
831         enum bna_rx_type rx_type =                                      \
832                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
833                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
834         bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
835         bfa_wc_up(&(enet)->chld_stop_wc);                               \
836         bna_ethport_stop(&(enet)->bna->ethport);                        \
837         bfa_wc_up(&(enet)->chld_stop_wc);                               \
838         bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type);                 \
839         bfa_wc_up(&(enet)->chld_stop_wc);                               \
840         bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
841         bfa_wc_wait(&(enet)->chld_stop_wc);                             \
842 } while (0)
843
844 #define bna_enet_chld_fail(enet)                                        \
845 do {                                                                    \
846         bna_ethport_fail(&(enet)->bna->ethport);                        \
847         bna_tx_mod_fail(&(enet)->bna->tx_mod);                          \
848         bna_rx_mod_fail(&(enet)->bna->rx_mod);                          \
849 } while (0)
850
851 #define bna_enet_rx_start(enet)                                         \
852 do {                                                                    \
853         enum bna_rx_type rx_type =                                      \
854                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
855                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
856         bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);                \
857 } while (0)
858
859 #define bna_enet_rx_stop(enet)                                          \
860 do {                                                                    \
861         enum bna_rx_type rx_type =                                      \
862                 ((enet)->type == BNA_ENET_T_REGULAR) ?                  \
863                 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;                   \
864         bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
865         bfa_wc_up(&(enet)->chld_stop_wc);                               \
866         bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);                 \
867         bfa_wc_wait(&(enet)->chld_stop_wc);                             \
868 } while (0)
869
870 #define call_enet_stop_cbfn(enet)                                       \
871 do {                                                                    \
872         if ((enet)->stop_cbfn) {                                        \
873                 void (*cbfn)(void *);                                   \
874                 void *cbarg;                                            \
875                 cbfn = (enet)->stop_cbfn;                               \
876                 cbarg = (enet)->stop_cbarg;                             \
877                 (enet)->stop_cbfn = NULL;                               \
878                 (enet)->stop_cbarg = NULL;                              \
879                 cbfn(cbarg);                                            \
880         }                                                               \
881 } while (0)
882
883 #define call_enet_pause_cbfn(enet)                                      \
884 do {                                                                    \
885         if ((enet)->pause_cbfn) {                                       \
886                 void (*cbfn)(struct bnad *);                            \
887                 cbfn = (enet)->pause_cbfn;                              \
888                 (enet)->pause_cbfn = NULL;                              \
889                 cbfn((enet)->bna->bnad);                                \
890         }                                                               \
891 } while (0)
892
893 #define call_enet_mtu_cbfn(enet)                                        \
894 do {                                                                    \
895         if ((enet)->mtu_cbfn) {                                         \
896                 void (*cbfn)(struct bnad *);                            \
897                 cbfn = (enet)->mtu_cbfn;                                \
898                 (enet)->mtu_cbfn = NULL;                                \
899                 cbfn((enet)->bna->bnad);                                \
900         }                                                               \
901 } while (0)
902
903 static void bna_enet_cb_chld_stopped(void *arg);
904 static void bna_bfi_pause_set(struct bna_enet *enet);
905
906 bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
907                         enum bna_enet_event);
908 bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
909                         enum bna_enet_event);
910 bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
911                         enum bna_enet_event);
912 bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
913                         enum bna_enet_event);
914 bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
915                         enum bna_enet_event);
916 bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
917                         enum bna_enet_event);
918 bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
919                         enum bna_enet_event);
920
921 static void
922 bna_enet_sm_stopped_entry(struct bna_enet *enet)
923 {
924         call_enet_pause_cbfn(enet);
925         call_enet_mtu_cbfn(enet);
926         call_enet_stop_cbfn(enet);
927 }
928
929 static void
930 bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
931 {
932         switch (event) {
933         case ENET_E_START:
934                 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
935                 break;
936
937         case ENET_E_STOP:
938                 call_enet_stop_cbfn(enet);
939                 break;
940
941         case ENET_E_FAIL:
942                 /* No-op */
943                 break;
944
945         case ENET_E_PAUSE_CFG:
946                 call_enet_pause_cbfn(enet);
947                 break;
948
949         case ENET_E_MTU_CFG:
950                 call_enet_mtu_cbfn(enet);
951                 break;
952
953         case ENET_E_CHLD_STOPPED:
954                 /**
955                  * This event is received due to Ethport, Tx and Rx objects
956                  * failing
957                  */
958                 /* No-op */
959                 break;
960
961         default:
962                 bfa_sm_fault(event);
963         }
964 }
965
966 static void
967 bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
968 {
969         bna_bfi_pause_set(enet);
970 }
971
972 static void
973 bna_enet_sm_pause_init_wait(struct bna_enet *enet,
974                                 enum bna_enet_event event)
975 {
976         switch (event) {
977         case ENET_E_STOP:
978                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
979                 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
980                 break;
981
982         case ENET_E_FAIL:
983                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
984                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
985                 break;
986
987         case ENET_E_PAUSE_CFG:
988                 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
989                 break;
990
991         case ENET_E_MTU_CFG:
992                 /* No-op */
993                 break;
994
995         case ENET_E_FWRESP_PAUSE:
996                 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
997                         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
998                         bna_bfi_pause_set(enet);
999                 } else {
1000                         bfa_fsm_set_state(enet, bna_enet_sm_started);
1001                         bna_enet_chld_start(enet);
1002                 }
1003                 break;
1004
1005         default:
1006                 bfa_sm_fault(event);
1007         }
1008 }
1009
1010 static void
1011 bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1012 {
1013         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1014 }
1015
1016 static void
1017 bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1018                                 enum bna_enet_event event)
1019 {
1020         switch (event) {
1021         case ENET_E_FAIL:
1022         case ENET_E_FWRESP_PAUSE:
1023                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1024                 break;
1025
1026         default:
1027                 bfa_sm_fault(event);
1028         }
1029 }
1030
1031 static void
1032 bna_enet_sm_started_entry(struct bna_enet *enet)
1033 {
1034         /**
1035          * NOTE: Do not call bna_enet_chld_start() here, since it will be
1036          * inadvertently called during cfg_wait->started transition as well
1037          */
1038         call_enet_pause_cbfn(enet);
1039         call_enet_mtu_cbfn(enet);
1040 }
1041
1042 static void
1043 bna_enet_sm_started(struct bna_enet *enet,
1044                         enum bna_enet_event event)
1045 {
1046         switch (event) {
1047         case ENET_E_STOP:
1048                 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1049                 break;
1050
1051         case ENET_E_FAIL:
1052                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1053                 bna_enet_chld_fail(enet);
1054                 break;
1055
1056         case ENET_E_PAUSE_CFG:
1057                 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1058                 bna_bfi_pause_set(enet);
1059                 break;
1060
1061         case ENET_E_MTU_CFG:
1062                 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1063                 bna_enet_rx_stop(enet);
1064                 break;
1065
1066         default:
1067                 bfa_sm_fault(event);
1068         }
1069 }
1070
1071 static void
1072 bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1073 {
1074 }
1075
1076 static void
1077 bna_enet_sm_cfg_wait(struct bna_enet *enet,
1078                         enum bna_enet_event event)
1079 {
1080         switch (event) {
1081         case ENET_E_STOP:
1082                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1083                 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1084                 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1085                 break;
1086
1087         case ENET_E_FAIL:
1088                 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1089                 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1090                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1091                 bna_enet_chld_fail(enet);
1092                 break;
1093
1094         case ENET_E_PAUSE_CFG:
1095                 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1096                 break;
1097
1098         case ENET_E_MTU_CFG:
1099                 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1100                 break;
1101
1102         case ENET_E_CHLD_STOPPED:
1103                 bna_enet_rx_start(enet);
1104                 /* Fall through */
1105         case ENET_E_FWRESP_PAUSE:
1106                 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1107                         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1108                         bna_bfi_pause_set(enet);
1109                 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1110                         enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1111                         bna_enet_rx_stop(enet);
1112                 } else {
1113                         bfa_fsm_set_state(enet, bna_enet_sm_started);
1114                 }
1115                 break;
1116
1117         default:
1118                 bfa_sm_fault(event);
1119         }
1120 }
1121
1122 static void
1123 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1124 {
1125         enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1126         enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1127 }
1128
1129 static void
1130 bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1131                                 enum bna_enet_event event)
1132 {
1133         switch (event) {
1134         case ENET_E_FAIL:
1135                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1136                 bna_enet_chld_fail(enet);
1137                 break;
1138
1139         case ENET_E_FWRESP_PAUSE:
1140         case ENET_E_CHLD_STOPPED:
1141                 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1142                 break;
1143
1144         default:
1145                 bfa_sm_fault(event);
1146         }
1147 }
1148
1149 static void
1150 bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1151 {
1152         bna_enet_chld_stop(enet);
1153 }
1154
1155 static void
1156 bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1157                                 enum bna_enet_event event)
1158 {
1159         switch (event) {
1160         case ENET_E_FAIL:
1161                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1162                 bna_enet_chld_fail(enet);
1163                 break;
1164
1165         case ENET_E_CHLD_STOPPED:
1166                 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1167                 break;
1168
1169         default:
1170                 bfa_sm_fault(event);
1171         }
1172 }
1173
1174 static void
1175 bna_bfi_pause_set(struct bna_enet *enet)
1176 {
1177         struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1178
1179         bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1180                 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1181         pause_req->mh.num_entries = htons(
1182         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1183         pause_req->tx_pause = enet->pause_config.tx_pause;
1184         pause_req->rx_pause = enet->pause_config.rx_pause;
1185
1186         bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1187                 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1188         bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1189 }
1190
1191 static void
1192 bna_enet_cb_chld_stopped(void *arg)
1193 {
1194         struct bna_enet *enet = (struct bna_enet *)arg;
1195
1196         bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1197 }
1198
1199 static void
1200 bna_enet_init(struct bna_enet *enet, struct bna *bna)
1201 {
1202         enet->bna = bna;
1203         enet->flags = 0;
1204         enet->mtu = 0;
1205         enet->type = BNA_ENET_T_REGULAR;
1206
1207         enet->stop_cbfn = NULL;
1208         enet->stop_cbarg = NULL;
1209
1210         enet->pause_cbfn = NULL;
1211
1212         enet->mtu_cbfn = NULL;
1213
1214         bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1215 }
1216
1217 static void
1218 bna_enet_uninit(struct bna_enet *enet)
1219 {
1220         enet->flags = 0;
1221
1222         enet->bna = NULL;
1223 }
1224
1225 static void
1226 bna_enet_start(struct bna_enet *enet)
1227 {
1228         enet->flags |= BNA_ENET_F_IOCETH_READY;
1229         if (enet->flags & BNA_ENET_F_ENABLED)
1230                 bfa_fsm_send_event(enet, ENET_E_START);
1231 }
1232
1233 static void
1234 bna_ioceth_cb_enet_stopped(void *arg)
1235 {
1236         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1237
1238         bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1239 }
1240
1241 static void
1242 bna_enet_stop(struct bna_enet *enet)
1243 {
1244         enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1245         enet->stop_cbarg = &enet->bna->ioceth;
1246
1247         enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1248         bfa_fsm_send_event(enet, ENET_E_STOP);
1249 }
1250
1251 static void
1252 bna_enet_fail(struct bna_enet *enet)
1253 {
1254         enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1255         bfa_fsm_send_event(enet, ENET_E_FAIL);
1256 }
1257
1258 void
1259 bna_enet_cb_tx_stopped(struct bna_enet *enet)
1260 {
1261         bfa_wc_down(&enet->chld_stop_wc);
1262 }
1263
1264 void
1265 bna_enet_cb_rx_stopped(struct bna_enet *enet)
1266 {
1267         bfa_wc_down(&enet->chld_stop_wc);
1268 }
1269
1270 int
1271 bna_enet_mtu_get(struct bna_enet *enet)
1272 {
1273         return enet->mtu;
1274 }
1275
1276 void
1277 bna_enet_enable(struct bna_enet *enet)
1278 {
1279         if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1280                 return;
1281
1282         enet->flags |= BNA_ENET_F_ENABLED;
1283
1284         if (enet->flags & BNA_ENET_F_IOCETH_READY)
1285                 bfa_fsm_send_event(enet, ENET_E_START);
1286 }
1287
1288 void
1289 bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1290                  void (*cbfn)(void *))
1291 {
1292         if (type == BNA_SOFT_CLEANUP) {
1293                 (*cbfn)(enet->bna->bnad);
1294                 return;
1295         }
1296
1297         enet->stop_cbfn = cbfn;
1298         enet->stop_cbarg = enet->bna->bnad;
1299
1300         enet->flags &= ~BNA_ENET_F_ENABLED;
1301
1302         bfa_fsm_send_event(enet, ENET_E_STOP);
1303 }
1304
1305 void
1306 bna_enet_pause_config(struct bna_enet *enet,
1307                       struct bna_pause_config *pause_config,
1308                       void (*cbfn)(struct bnad *))
1309 {
1310         enet->pause_config = *pause_config;
1311
1312         enet->pause_cbfn = cbfn;
1313
1314         bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1315 }
1316
1317 void
1318 bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1319                  void (*cbfn)(struct bnad *))
1320 {
1321         enet->mtu = mtu;
1322
1323         enet->mtu_cbfn = cbfn;
1324
1325         bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1326 }
1327
1328 void
1329 bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1330 {
1331         *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1332 }
1333
1334 /* IOCETH */
1335
1336 #define enable_mbox_intr(_ioceth)                                       \
1337 do {                                                                    \
1338         u32 intr_status;                                                \
1339         bna_intr_status_get((_ioceth)->bna, intr_status);               \
1340         bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad);                 \
1341         bna_mbox_intr_enable((_ioceth)->bna);                           \
1342 } while (0)
1343
1344 #define disable_mbox_intr(_ioceth)                                      \
1345 do {                                                                    \
1346         bna_mbox_intr_disable((_ioceth)->bna);                          \
1347         bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad);                \
1348 } while (0)
1349
1350 #define call_ioceth_stop_cbfn(_ioceth)                                  \
1351 do {                                                                    \
1352         if ((_ioceth)->stop_cbfn) {                                     \
1353                 void (*cbfn)(struct bnad *);                            \
1354                 struct bnad *cbarg;                                     \
1355                 cbfn = (_ioceth)->stop_cbfn;                            \
1356                 cbarg = (_ioceth)->stop_cbarg;                          \
1357                 (_ioceth)->stop_cbfn = NULL;                            \
1358                 (_ioceth)->stop_cbarg = NULL;                           \
1359                 cbfn(cbarg);                                            \
1360         }                                                               \
1361 } while (0)
1362
1363 #define bna_stats_mod_uninit(_stats_mod)                                \
1364 do {                                                                    \
1365 } while (0)
1366
1367 #define bna_stats_mod_start(_stats_mod)                                 \
1368 do {                                                                    \
1369         (_stats_mod)->ioc_ready = true;                                 \
1370 } while (0)
1371
1372 #define bna_stats_mod_stop(_stats_mod)                                  \
1373 do {                                                                    \
1374         (_stats_mod)->ioc_ready = false;                                \
1375 } while (0)
1376
1377 #define bna_stats_mod_fail(_stats_mod)                                  \
1378 do {                                                                    \
1379         (_stats_mod)->ioc_ready = false;                                \
1380         (_stats_mod)->stats_get_busy = false;                           \
1381         (_stats_mod)->stats_clr_busy = false;                           \
1382 } while (0)
1383
1384 static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1385
1386 bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1387                         enum bna_ioceth_event);
1388 bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1389                         enum bna_ioceth_event);
1390 bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1391                         enum bna_ioceth_event);
1392 bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1393                         enum bna_ioceth_event);
1394 bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1395                         enum bna_ioceth_event);
1396 bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1397                         enum bna_ioceth_event);
1398 bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1399                         enum bna_ioceth_event);
1400 bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1401                         enum bna_ioceth_event);
1402
1403 static void
1404 bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1405 {
1406         call_ioceth_stop_cbfn(ioceth);
1407 }
1408
1409 static void
1410 bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1411                         enum bna_ioceth_event event)
1412 {
1413         switch (event) {
1414         case IOCETH_E_ENABLE:
1415                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1416                 bfa_nw_ioc_enable(&ioceth->ioc);
1417                 break;
1418
1419         case IOCETH_E_DISABLE:
1420                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1421                 break;
1422
1423         case IOCETH_E_IOC_RESET:
1424                 enable_mbox_intr(ioceth);
1425                 break;
1426
1427         case IOCETH_E_IOC_FAILED:
1428                 disable_mbox_intr(ioceth);
1429                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1430                 break;
1431
1432         default:
1433                 bfa_sm_fault(event);
1434         }
1435 }
1436
1437 static void
1438 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1439 {
1440         /**
1441          * Do not call bfa_nw_ioc_enable() here. It must be called in the
1442          * previous state due to failed -> ioc_ready_wait transition.
1443          */
1444 }
1445
1446 static void
1447 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1448                                 enum bna_ioceth_event event)
1449 {
1450         switch (event) {
1451         case IOCETH_E_DISABLE:
1452                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1453                 bfa_nw_ioc_disable(&ioceth->ioc);
1454                 break;
1455
1456         case IOCETH_E_IOC_RESET:
1457                 enable_mbox_intr(ioceth);
1458                 break;
1459
1460         case IOCETH_E_IOC_FAILED:
1461                 disable_mbox_intr(ioceth);
1462                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1463                 break;
1464
1465         case IOCETH_E_IOC_READY:
1466                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1467                 break;
1468
1469         default:
1470                 bfa_sm_fault(event);
1471         }
1472 }
1473
1474 static void
1475 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1476 {
1477         bna_bfi_attr_get(ioceth);
1478 }
1479
1480 static void
1481 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1482                                 enum bna_ioceth_event event)
1483 {
1484         switch (event) {
1485         case IOCETH_E_DISABLE:
1486                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1487                 break;
1488
1489         case IOCETH_E_IOC_FAILED:
1490                 disable_mbox_intr(ioceth);
1491                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1492                 break;
1493
1494         case IOCETH_E_ENET_ATTR_RESP:
1495                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1496                 break;
1497
1498         default:
1499                 bfa_sm_fault(event);
1500         }
1501 }
1502
1503 static void
1504 bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1505 {
1506         bna_enet_start(&ioceth->bna->enet);
1507         bna_stats_mod_start(&ioceth->bna->stats_mod);
1508         bnad_cb_ioceth_ready(ioceth->bna->bnad);
1509 }
1510
1511 static void
1512 bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1513 {
1514         switch (event) {
1515         case IOCETH_E_DISABLE:
1516                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1517                 break;
1518
1519         case IOCETH_E_IOC_FAILED:
1520                 disable_mbox_intr(ioceth);
1521                 bna_enet_fail(&ioceth->bna->enet);
1522                 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1523                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1524                 break;
1525
1526         default:
1527                 bfa_sm_fault(event);
1528         }
1529 }
1530
1531 static void
1532 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1533 {
1534 }
1535
1536 static void
1537 bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1538                                 enum bna_ioceth_event event)
1539 {
1540         switch (event) {
1541         case IOCETH_E_IOC_FAILED:
1542                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1543                 disable_mbox_intr(ioceth);
1544                 bfa_nw_ioc_disable(&ioceth->ioc);
1545                 break;
1546
1547         case IOCETH_E_ENET_ATTR_RESP:
1548                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1549                 bfa_nw_ioc_disable(&ioceth->ioc);
1550                 break;
1551
1552         default:
1553                 bfa_sm_fault(event);
1554         }
1555 }
1556
1557 static void
1558 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1559 {
1560         bna_stats_mod_stop(&ioceth->bna->stats_mod);
1561         bna_enet_stop(&ioceth->bna->enet);
1562 }
1563
1564 static void
1565 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1566                                 enum bna_ioceth_event event)
1567 {
1568         switch (event) {
1569         case IOCETH_E_IOC_FAILED:
1570                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1571                 disable_mbox_intr(ioceth);
1572                 bna_enet_fail(&ioceth->bna->enet);
1573                 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1574                 bfa_nw_ioc_disable(&ioceth->ioc);
1575                 break;
1576
1577         case IOCETH_E_ENET_STOPPED:
1578                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1579                 bfa_nw_ioc_disable(&ioceth->ioc);
1580                 break;
1581
1582         default:
1583                 bfa_sm_fault(event);
1584         }
1585 }
1586
1587 static void
1588 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1589 {
1590 }
1591
1592 static void
1593 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1594                                 enum bna_ioceth_event event)
1595 {
1596         switch (event) {
1597         case IOCETH_E_IOC_DISABLED:
1598                 disable_mbox_intr(ioceth);
1599                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1600                 break;
1601
1602         case IOCETH_E_ENET_STOPPED:
1603                 /* This event is received due to enet failing */
1604                 /* No-op */
1605                 break;
1606
1607         default:
1608                 bfa_sm_fault(event);
1609         }
1610 }
1611
1612 static void
1613 bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1614 {
1615         bnad_cb_ioceth_failed(ioceth->bna->bnad);
1616 }
1617
1618 static void
1619 bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1620                         enum bna_ioceth_event event)
1621 {
1622         switch (event) {
1623         case IOCETH_E_DISABLE:
1624                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1625                 bfa_nw_ioc_disable(&ioceth->ioc);
1626                 break;
1627
1628         case IOCETH_E_IOC_RESET:
1629                 enable_mbox_intr(ioceth);
1630                 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1631                 break;
1632
1633         case IOCETH_E_IOC_FAILED:
1634                 break;
1635
1636         default:
1637                 bfa_sm_fault(event);
1638         }
1639 }
1640
1641 static void
1642 bna_bfi_attr_get(struct bna_ioceth *ioceth)
1643 {
1644         struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1645
1646         bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1647                 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1648         attr_req->mh.num_entries = htons(
1649         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1650         bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1651                 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1652         bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1653 }
1654
1655 /* IOC callback functions */
1656
1657 static void
1658 bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1659 {
1660         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1661
1662         if (error)
1663                 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1664         else
1665                 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1666 }
1667
1668 static void
1669 bna_cb_ioceth_disable(void *arg)
1670 {
1671         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1672
1673         bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1674 }
1675
1676 static void
1677 bna_cb_ioceth_hbfail(void *arg)
1678 {
1679         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1680
1681         bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1682 }
1683
1684 static void
1685 bna_cb_ioceth_reset(void *arg)
1686 {
1687         struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1688
1689         bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1690 }
1691
1692 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1693         bna_cb_ioceth_enable,
1694         bna_cb_ioceth_disable,
1695         bna_cb_ioceth_hbfail,
1696         bna_cb_ioceth_reset
1697 };
1698
1699 static void bna_attr_init(struct bna_ioceth *ioceth)
1700 {
1701         ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1702         ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1703         ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1704         ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1705         ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1706         ioceth->attr.fw_query_complete = false;
1707 }
1708
1709 static void
1710 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1711                 struct bna_res_info *res_info)
1712 {
1713         u64 dma;
1714         u8 *kva;
1715
1716         ioceth->bna = bna;
1717
1718         /**
1719          * Attach IOC and claim:
1720          *      1. DMA memory for IOC attributes
1721          *      2. Kernel memory for FW trace
1722          */
1723         bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1724         bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1725
1726         BNA_GET_DMA_ADDR(
1727                 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1728         kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1729         bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1730
1731         kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1732         bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1733
1734         /**
1735          * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1736          * DMA memory.
1737          */
1738         BNA_GET_DMA_ADDR(
1739                 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1740         kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1741         bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1742         bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1743         kva += bfa_nw_cee_meminfo();
1744         dma += bfa_nw_cee_meminfo();
1745
1746         bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1747         bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1748         kva += bfa_nw_flash_meminfo();
1749         dma += bfa_nw_flash_meminfo();
1750
1751         bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1752         bfa_msgq_memclaim(&bna->msgq, kva, dma);
1753         bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1754         kva += bfa_msgq_meminfo();
1755         dma += bfa_msgq_meminfo();
1756
1757         ioceth->stop_cbfn = NULL;
1758         ioceth->stop_cbarg = NULL;
1759
1760         bna_attr_init(ioceth);
1761
1762         bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1763 }
1764
1765 static void
1766 bna_ioceth_uninit(struct bna_ioceth *ioceth)
1767 {
1768         bfa_nw_ioc_detach(&ioceth->ioc);
1769
1770         ioceth->bna = NULL;
1771 }
1772
1773 void
1774 bna_ioceth_enable(struct bna_ioceth *ioceth)
1775 {
1776         if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1777                 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1778                 return;
1779         }
1780
1781         if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1782                 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1783 }
1784
1785 void
1786 bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1787 {
1788         if (type == BNA_SOFT_CLEANUP) {
1789                 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1790                 return;
1791         }
1792
1793         ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1794         ioceth->stop_cbarg = ioceth->bna->bnad;
1795
1796         bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1797 }
1798
1799 static void
1800 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1801                   struct bna_res_info *res_info)
1802 {
1803         int i;
1804
1805         ucam_mod->ucmac = (struct bna_mac *)
1806         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1807
1808         INIT_LIST_HEAD(&ucam_mod->free_q);
1809         for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1810                 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1811                 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1812         }
1813
1814         /* A separate queue to allow synchronous setting of a list of MACs */
1815         INIT_LIST_HEAD(&ucam_mod->del_q);
1816         for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
1817                 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1818                 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
1819         }
1820
1821         ucam_mod->bna = bna;
1822 }
1823
1824 static void
1825 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1826 {
1827         struct list_head *qe;
1828         int i;
1829
1830         i = 0;
1831         list_for_each(qe, &ucam_mod->free_q)
1832                 i++;
1833
1834         i = 0;
1835         list_for_each(qe, &ucam_mod->del_q)
1836                 i++;
1837
1838         ucam_mod->bna = NULL;
1839 }
1840
1841 static void
1842 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1843                   struct bna_res_info *res_info)
1844 {
1845         int i;
1846
1847         mcam_mod->mcmac = (struct bna_mac *)
1848         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1849
1850         INIT_LIST_HEAD(&mcam_mod->free_q);
1851         for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1852                 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1853                 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1854         }
1855
1856         mcam_mod->mchandle = (struct bna_mcam_handle *)
1857         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1858
1859         INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1860         for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1861                 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1862                 list_add_tail(&mcam_mod->mchandle[i].qe,
1863                                 &mcam_mod->free_handle_q);
1864         }
1865
1866         /* A separate queue to allow synchronous setting of a list of MACs */
1867         INIT_LIST_HEAD(&mcam_mod->del_q);
1868         for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
1869                 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1870                 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
1871         }
1872
1873         mcam_mod->bna = bna;
1874 }
1875
1876 static void
1877 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1878 {
1879         struct list_head *qe;
1880         int i;
1881
1882         i = 0;
1883         list_for_each(qe, &mcam_mod->free_q) i++;
1884
1885         i = 0;
1886         list_for_each(qe, &mcam_mod->del_q) i++;
1887
1888         i = 0;
1889         list_for_each(qe, &mcam_mod->free_handle_q) i++;
1890
1891         mcam_mod->bna = NULL;
1892 }
1893
1894 static void
1895 bna_bfi_stats_get(struct bna *bna)
1896 {
1897         struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1898
1899         bna->stats_mod.stats_get_busy = true;
1900
1901         bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1902                 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1903         stats_req->mh.num_entries = htons(
1904                 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1905         stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1906         stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1907         stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1908         stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1909         stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1910
1911         bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1912                 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1913         bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1914 }
1915
1916 void
1917 bna_res_req(struct bna_res_info *res_info)
1918 {
1919         /* DMA memory for COMMON_MODULE */
1920         res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1921         res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1922         res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1923         res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1924                                 (bfa_nw_cee_meminfo() +
1925                                  bfa_nw_flash_meminfo() +
1926                                  bfa_msgq_meminfo()), PAGE_SIZE);
1927
1928         /* DMA memory for retrieving IOC attributes */
1929         res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1930         res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1931         res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1932         res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1933                                 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1934
1935         /* Virtual memory for retreiving fw_trc */
1936         res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1937         res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1938         res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1939         res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1940
1941         /* DMA memory for retreiving stats */
1942         res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1943         res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1944         res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1945         res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1946                                 ALIGN(sizeof(struct bfi_enet_stats),
1947                                         PAGE_SIZE);
1948 }
1949
1950 void
1951 bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1952 {
1953         struct bna_attr *attr = &bna->ioceth.attr;
1954
1955         /* Virtual memory for Tx objects - stored by Tx module */
1956         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1957         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1958                 BNA_MEM_T_KVA;
1959         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1960         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1961                 attr->num_txq * sizeof(struct bna_tx);
1962
1963         /* Virtual memory for TxQ - stored by Tx module */
1964         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1965         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1966                 BNA_MEM_T_KVA;
1967         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1968         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1969                 attr->num_txq * sizeof(struct bna_txq);
1970
1971         /* Virtual memory for Rx objects - stored by Rx module */
1972         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1973         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1974                 BNA_MEM_T_KVA;
1975         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1976         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1977                 attr->num_rxp * sizeof(struct bna_rx);
1978
1979         /* Virtual memory for RxPath - stored by Rx module */
1980         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1981         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1982                 BNA_MEM_T_KVA;
1983         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1984         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1985                 attr->num_rxp * sizeof(struct bna_rxp);
1986
1987         /* Virtual memory for RxQ - stored by Rx module */
1988         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1989         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1990                 BNA_MEM_T_KVA;
1991         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1992         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1993                 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1994
1995         /* Virtual memory for Unicast MAC address - stored by ucam module */
1996         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1997         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1998                 BNA_MEM_T_KVA;
1999         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
2000         res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
2001                 (attr->num_ucmac * 2) * sizeof(struct bna_mac);
2002
2003         /* Virtual memory for Multicast MAC address - stored by mcam module */
2004         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2005         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
2006                 BNA_MEM_T_KVA;
2007         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
2008         res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
2009                 (attr->num_mcmac * 2) * sizeof(struct bna_mac);
2010
2011         /* Virtual memory for Multicast handle - stored by mcam module */
2012         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
2013         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
2014                 BNA_MEM_T_KVA;
2015         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
2016         res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
2017                 attr->num_mcmac * sizeof(struct bna_mcam_handle);
2018 }
2019
2020 void
2021 bna_init(struct bna *bna, struct bnad *bnad,
2022                 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
2023 {
2024         bna->bnad = bnad;
2025         bna->pcidev = *pcidev;
2026
2027         bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2028                 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2029         bna->stats.hw_stats_dma.msb =
2030                 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2031         bna->stats.hw_stats_dma.lsb =
2032                 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2033
2034         bna_reg_addr_init(bna, &bna->pcidev);
2035
2036         /* Also initializes diag, cee, sfp, phy_port, msgq */
2037         bna_ioceth_init(&bna->ioceth, bna, res_info);
2038
2039         bna_enet_init(&bna->enet, bna);
2040         bna_ethport_init(&bna->ethport, bna);
2041 }
2042
2043 void
2044 bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2045 {
2046         bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2047
2048         bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2049
2050         bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2051
2052         bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2053
2054         bna->default_mode_rid = BFI_INVALID_RID;
2055         bna->promisc_rid = BFI_INVALID_RID;
2056
2057         bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2058 }
2059
2060 void
2061 bna_uninit(struct bna *bna)
2062 {
2063         if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2064                 bna_mcam_mod_uninit(&bna->mcam_mod);
2065                 bna_ucam_mod_uninit(&bna->ucam_mod);
2066                 bna_rx_mod_uninit(&bna->rx_mod);
2067                 bna_tx_mod_uninit(&bna->tx_mod);
2068                 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2069         }
2070
2071         bna_stats_mod_uninit(&bna->stats_mod);
2072         bna_ethport_uninit(&bna->ethport);
2073         bna_enet_uninit(&bna->enet);
2074
2075         bna_ioceth_uninit(&bna->ioceth);
2076
2077         bna->bnad = NULL;
2078 }
2079
2080 int
2081 bna_num_txq_set(struct bna *bna, int num_txq)
2082 {
2083         if (bna->ioceth.attr.fw_query_complete &&
2084                 (num_txq <= bna->ioceth.attr.num_txq)) {
2085                 bna->ioceth.attr.num_txq = num_txq;
2086                 return BNA_CB_SUCCESS;
2087         }
2088
2089         return BNA_CB_FAIL;
2090 }
2091
2092 int
2093 bna_num_rxp_set(struct bna *bna, int num_rxp)
2094 {
2095         if (bna->ioceth.attr.fw_query_complete &&
2096                 (num_rxp <= bna->ioceth.attr.num_rxp)) {
2097                 bna->ioceth.attr.num_rxp = num_rxp;
2098                 return BNA_CB_SUCCESS;
2099         }
2100
2101         return BNA_CB_FAIL;
2102 }
2103
2104 struct bna_mac *
2105 bna_cam_mod_mac_get(struct list_head *head)
2106 {
2107         struct list_head *qe;
2108
2109         if (list_empty(head))
2110                 return NULL;
2111
2112         bfa_q_deq(head, &qe);
2113         return (struct bna_mac *)qe;
2114 }
2115
2116 void
2117 bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
2118 {
2119         list_add_tail(&mac->qe, tail);
2120 }
2121
2122 struct bna_mcam_handle *
2123 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2124 {
2125         struct list_head *qe;
2126
2127         if (list_empty(&mcam_mod->free_handle_q))
2128                 return NULL;
2129
2130         bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2131
2132         return (struct bna_mcam_handle *)qe;
2133 }
2134
2135 void
2136 bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2137                         struct bna_mcam_handle *handle)
2138 {
2139         list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2140 }
2141
2142 void
2143 bna_hw_stats_get(struct bna *bna)
2144 {
2145         if (!bna->stats_mod.ioc_ready) {
2146                 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2147                 return;
2148         }
2149         if (bna->stats_mod.stats_get_busy) {
2150                 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2151                 return;
2152         }
2153
2154         bna_bfi_stats_get(bna);
2155 }