2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
23 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25 ib->coalescing_timeo = coalescing_timeo;
26 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
27 (u32)ib->coalescing_timeo, 0);
32 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
34 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
35 (rxf)->vlan_strip_pending = true; \
38 #define bna_rxf_rss_cfg_soft_reset(rxf) \
40 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
41 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
42 BNA_RSS_F_CFG_PENDING | \
43 BNA_RSS_F_STATUS_PENDING); \
46 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
47 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
48 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54 enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56 enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58 enum bna_cleanup_type cleanup);
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
62 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
64 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
66 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
68 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
70 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
74 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
76 call_rxf_stop_cbfn(rxf);
80 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
84 if (rxf->flags & BNA_RXF_F_PAUSED) {
85 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
86 call_rxf_start_cbfn(rxf);
88 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
92 call_rxf_stop_cbfn(rxf);
100 call_rxf_cam_fltr_cbfn(rxf);
104 rxf->flags |= BNA_RXF_F_PAUSED;
105 call_rxf_pause_cbfn(rxf);
109 rxf->flags &= ~BNA_RXF_F_PAUSED;
110 call_rxf_resume_cbfn(rxf);
119 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
121 call_rxf_pause_cbfn(rxf);
125 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
130 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
134 call_rxf_cam_fltr_cbfn(rxf);
138 rxf->flags &= ~BNA_RXF_F_PAUSED;
139 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
148 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
150 if (!bna_rxf_cfg_apply(rxf)) {
151 /* No more pending config updates */
152 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
157 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
161 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
165 bna_rxf_cfg_reset(rxf);
166 call_rxf_start_cbfn(rxf);
167 call_rxf_cam_fltr_cbfn(rxf);
168 call_rxf_resume_cbfn(rxf);
169 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
177 rxf->flags |= BNA_RXF_F_PAUSED;
178 call_rxf_start_cbfn(rxf);
179 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
183 if (!bna_rxf_cfg_apply(rxf)) {
184 /* No more pending config updates */
185 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
195 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
197 call_rxf_start_cbfn(rxf);
198 call_rxf_cam_fltr_cbfn(rxf);
199 call_rxf_resume_cbfn(rxf);
203 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
208 bna_rxf_cfg_reset(rxf);
209 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
213 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
217 rxf->flags |= BNA_RXF_F_PAUSED;
218 if (!bna_rxf_fltr_clear(rxf))
219 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
221 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
230 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
235 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
239 bna_rxf_cfg_reset(rxf);
240 call_rxf_pause_cbfn(rxf);
241 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
245 if (!bna_rxf_fltr_clear(rxf)) {
246 /* No more pending CAM entries to clear */
247 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
257 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
262 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
267 bna_rxf_cfg_reset(rxf);
268 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
277 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
278 enum bfi_enet_h2i_msgs req_type)
280 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
282 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
283 req->mh.num_entries = htons(
284 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
285 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
286 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
287 sizeof(struct bfi_enet_ucast_req), &req->mh);
288 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
292 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
294 struct bfi_enet_mcast_add_req *req =
295 &rxf->bfi_enet_cmd.mcast_add_req;
297 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
299 req->mh.num_entries = htons(
300 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
301 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
302 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
303 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
304 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
308 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
310 struct bfi_enet_mcast_del_req *req =
311 &rxf->bfi_enet_cmd.mcast_del_req;
313 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
315 req->mh.num_entries = htons(
316 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
317 req->handle = htons(handle);
318 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
319 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
320 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
324 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
326 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
328 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
329 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
330 req->mh.num_entries = htons(
331 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
332 req->enable = status;
333 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
334 sizeof(struct bfi_enet_enable_req), &req->mh);
335 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
339 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
341 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
343 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
344 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
345 req->mh.num_entries = htons(
346 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
347 req->enable = status;
348 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
349 sizeof(struct bfi_enet_enable_req), &req->mh);
350 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
354 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
356 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
360 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
361 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
362 req->mh.num_entries = htons(
363 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
364 req->block_idx = block_idx;
365 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
366 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
367 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
369 htonl(rxf->vlan_filter_table[j]);
371 req->bit_mask[i] = 0xFFFFFFFF;
373 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
374 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
375 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
379 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
381 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
383 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
384 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
385 req->mh.num_entries = htons(
386 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
387 req->enable = rxf->vlan_strip_status;
388 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
389 sizeof(struct bfi_enet_enable_req), &req->mh);
390 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
394 bna_bfi_rit_cfg(struct bna_rxf *rxf)
396 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
398 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
399 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
400 req->mh.num_entries = htons(
401 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
402 req->size = htons(rxf->rit_size);
403 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
404 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
405 sizeof(struct bfi_enet_rit_req), &req->mh);
406 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
410 bna_bfi_rss_cfg(struct bna_rxf *rxf)
412 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
415 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
416 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
417 req->mh.num_entries = htons(
418 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
419 req->cfg.type = rxf->rss_cfg.hash_type;
420 req->cfg.mask = rxf->rss_cfg.hash_mask;
421 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
423 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
424 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
425 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
426 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
430 bna_bfi_rss_enable(struct bna_rxf *rxf)
432 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
434 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
435 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
436 req->mh.num_entries = htons(
437 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
438 req->enable = rxf->rss_status;
439 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
440 sizeof(struct bfi_enet_enable_req), &req->mh);
441 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
444 /* This function gets the multicast MAC that has already been added to CAM */
445 static struct bna_mac *
446 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
449 struct list_head *qe;
451 list_for_each(qe, &rxf->mcast_active_q) {
452 mac = (struct bna_mac *)qe;
453 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
457 list_for_each(qe, &rxf->mcast_pending_del_q) {
458 mac = (struct bna_mac *)qe;
459 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
466 static struct bna_mcam_handle *
467 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
469 struct bna_mcam_handle *mchandle;
470 struct list_head *qe;
472 list_for_each(qe, &rxf->mcast_handle_q) {
473 mchandle = (struct bna_mcam_handle *)qe;
474 if (mchandle->handle == handle)
482 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
484 struct bna_mac *mcmac;
485 struct bna_mcam_handle *mchandle;
487 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
488 mchandle = bna_rxf_mchandle_get(rxf, handle);
489 if (mchandle == NULL) {
490 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
491 mchandle->handle = handle;
492 mchandle->refcnt = 0;
493 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
496 mcmac->handle = mchandle;
500 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
501 enum bna_cleanup_type cleanup)
503 struct bna_mcam_handle *mchandle;
506 mchandle = mac->handle;
507 if (mchandle == NULL)
511 if (mchandle->refcnt == 0) {
512 if (cleanup == BNA_HARD_CLEANUP) {
513 bna_bfi_mcast_del_req(rxf, mchandle->handle);
516 list_del(&mchandle->qe);
517 bfa_q_qe_init(&mchandle->qe);
518 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
526 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
528 struct bna_mac *mac = NULL;
529 struct list_head *qe;
532 /* Delete multicast entries previousely added */
533 while (!list_empty(&rxf->mcast_pending_del_q)) {
534 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
536 mac = (struct bna_mac *)qe;
537 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
538 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
543 /* Add multicast entries */
544 if (!list_empty(&rxf->mcast_pending_add_q)) {
545 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
547 mac = (struct bna_mac *)qe;
548 list_add_tail(&mac->qe, &rxf->mcast_active_q);
549 bna_bfi_mcast_add_req(rxf, mac);
557 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
559 u8 vlan_pending_bitmask;
562 if (rxf->vlan_pending_bitmask) {
563 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
564 while (!(vlan_pending_bitmask & 0x1)) {
566 vlan_pending_bitmask >>= 1;
568 rxf->vlan_pending_bitmask &= ~(1 << block_idx);
569 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
577 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
579 struct list_head *qe;
583 /* Throw away delete pending mcast entries */
584 while (!list_empty(&rxf->mcast_pending_del_q)) {
585 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
587 mac = (struct bna_mac *)qe;
588 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
589 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
594 /* Move active mcast entries to pending_add_q */
595 while (!list_empty(&rxf->mcast_active_q)) {
596 bfa_q_deq(&rxf->mcast_active_q, &qe);
598 list_add_tail(qe, &rxf->mcast_pending_add_q);
599 mac = (struct bna_mac *)qe;
600 if (bna_rxf_mcast_del(rxf, mac, cleanup))
608 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
610 if (rxf->rss_pending) {
611 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
612 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
613 bna_bfi_rit_cfg(rxf);
617 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
618 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
619 bna_bfi_rss_cfg(rxf);
623 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
624 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
625 bna_bfi_rss_enable(rxf);
634 bna_rxf_cfg_apply(struct bna_rxf *rxf)
636 if (bna_rxf_ucast_cfg_apply(rxf))
639 if (bna_rxf_mcast_cfg_apply(rxf))
642 if (bna_rxf_promisc_cfg_apply(rxf))
645 if (bna_rxf_allmulti_cfg_apply(rxf))
648 if (bna_rxf_vlan_cfg_apply(rxf))
651 if (bna_rxf_vlan_strip_cfg_apply(rxf))
654 if (bna_rxf_rss_cfg_apply(rxf))
660 /* Only software reset */
662 bna_rxf_fltr_clear(struct bna_rxf *rxf)
664 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
667 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
670 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
673 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
680 bna_rxf_cfg_reset(struct bna_rxf *rxf)
682 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
683 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686 bna_rxf_vlan_cfg_soft_reset(rxf);
687 bna_rxf_rss_cfg_soft_reset(rxf);
691 bna_rit_init(struct bna_rxf *rxf, int rit_size)
693 struct bna_rx *rx = rxf->rx;
695 struct list_head *qe;
698 rxf->rit_size = rit_size;
699 list_for_each(qe, &rx->rxp_q) {
700 rxp = (struct bna_rxp *)qe;
701 rxf->rit[offset] = rxp->cq.ccb->id;
708 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
710 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
714 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
715 struct bfi_msgq_mhdr *msghdr)
717 struct bfi_enet_mcast_add_req *req =
718 &rxf->bfi_enet_cmd.mcast_add_req;
719 struct bfi_enet_mcast_add_rsp *rsp =
720 (struct bfi_enet_mcast_add_rsp *)msghdr;
722 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
724 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
728 bna_rxf_init(struct bna_rxf *rxf,
730 struct bna_rx_config *q_config,
731 struct bna_res_info *res_info)
735 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
736 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
737 rxf->ucast_pending_set = 0;
738 rxf->ucast_active_set = 0;
739 INIT_LIST_HEAD(&rxf->ucast_active_q);
740 rxf->ucast_pending_mac = NULL;
742 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
743 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
744 INIT_LIST_HEAD(&rxf->mcast_active_q);
745 INIT_LIST_HEAD(&rxf->mcast_handle_q);
747 if (q_config->paused)
748 rxf->flags |= BNA_RXF_F_PAUSED;
751 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
752 bna_rit_init(rxf, q_config->num_paths);
754 rxf->rss_status = q_config->rss_status;
755 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
756 rxf->rss_cfg = q_config->rss_config;
757 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
758 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
759 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
762 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
763 memset(rxf->vlan_filter_table, 0,
764 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
765 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
766 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
768 rxf->vlan_strip_status = q_config->vlan_strip_status;
770 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
774 bna_rxf_uninit(struct bna_rxf *rxf)
778 rxf->ucast_pending_set = 0;
779 rxf->ucast_active_set = 0;
781 while (!list_empty(&rxf->ucast_pending_add_q)) {
782 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
783 bfa_q_qe_init(&mac->qe);
784 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
787 if (rxf->ucast_pending_mac) {
788 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
789 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
790 rxf->ucast_pending_mac);
791 rxf->ucast_pending_mac = NULL;
794 while (!list_empty(&rxf->mcast_pending_add_q)) {
795 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
796 bfa_q_qe_init(&mac->qe);
797 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
800 rxf->rxmode_pending = 0;
801 rxf->rxmode_pending_bitmask = 0;
802 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
803 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
804 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
805 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
807 rxf->rss_pending = 0;
808 rxf->vlan_strip_pending = false;
816 bna_rx_cb_rxf_started(struct bna_rx *rx)
818 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
822 bna_rxf_start(struct bna_rxf *rxf)
824 rxf->start_cbfn = bna_rx_cb_rxf_started;
825 rxf->start_cbarg = rxf->rx;
826 bfa_fsm_send_event(rxf, RXF_E_START);
830 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
832 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
836 bna_rxf_stop(struct bna_rxf *rxf)
838 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
839 rxf->stop_cbarg = rxf->rx;
840 bfa_fsm_send_event(rxf, RXF_E_STOP);
844 bna_rxf_fail(struct bna_rxf *rxf)
846 bfa_fsm_send_event(rxf, RXF_E_FAIL);
850 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
851 void (*cbfn)(struct bnad *, struct bna_rx *))
853 struct bna_rxf *rxf = &rx->rxf;
855 if (rxf->ucast_pending_mac == NULL) {
856 rxf->ucast_pending_mac =
857 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
858 if (rxf->ucast_pending_mac == NULL)
859 return BNA_CB_UCAST_CAM_FULL;
860 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
863 memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
864 rxf->ucast_pending_set = 1;
865 rxf->cam_fltr_cbfn = cbfn;
866 rxf->cam_fltr_cbarg = rx->bna->bnad;
868 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
870 return BNA_CB_SUCCESS;
874 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
875 void (*cbfn)(struct bnad *, struct bna_rx *))
877 struct bna_rxf *rxf = &rx->rxf;
880 /* Check if already added or pending addition */
881 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
882 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
884 cbfn(rx->bna->bnad, rx);
885 return BNA_CB_SUCCESS;
888 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
890 return BNA_CB_MCAST_LIST_FULL;
891 bfa_q_qe_init(&mac->qe);
892 memcpy(mac->addr, addr, ETH_ALEN);
893 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
895 rxf->cam_fltr_cbfn = cbfn;
896 rxf->cam_fltr_cbarg = rx->bna->bnad;
898 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
900 return BNA_CB_SUCCESS;
904 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
905 void (*cbfn)(struct bnad *, struct bna_rx *))
907 struct bna_rxf *rxf = &rx->rxf;
908 struct list_head list_head;
909 struct list_head *qe;
915 INIT_LIST_HEAD(&list_head);
916 for (i = 0, mcaddr = mclist; i < count; i++) {
917 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
920 bfa_q_qe_init(&mac->qe);
921 memcpy(mac->addr, mcaddr, ETH_ALEN);
922 list_add_tail(&mac->qe, &list_head);
927 /* Purge the pending_add_q */
928 while (!list_empty(&rxf->mcast_pending_add_q)) {
929 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
931 mac = (struct bna_mac *)qe;
932 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
935 /* Schedule active_q entries for deletion */
936 while (!list_empty(&rxf->mcast_active_q)) {
937 bfa_q_deq(&rxf->mcast_active_q, &qe);
938 mac = (struct bna_mac *)qe;
939 bfa_q_qe_init(&mac->qe);
940 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
943 /* Add the new entries */
944 while (!list_empty(&list_head)) {
945 bfa_q_deq(&list_head, &qe);
946 mac = (struct bna_mac *)qe;
947 bfa_q_qe_init(&mac->qe);
948 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
951 rxf->cam_fltr_cbfn = cbfn;
952 rxf->cam_fltr_cbarg = rx->bna->bnad;
953 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
955 return BNA_CB_SUCCESS;
958 while (!list_empty(&list_head)) {
959 bfa_q_deq(&list_head, &qe);
960 mac = (struct bna_mac *)qe;
961 bfa_q_qe_init(&mac->qe);
962 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
965 return BNA_CB_MCAST_LIST_FULL;
969 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
971 struct bna_rxf *rxf = &rx->rxf;
972 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
973 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
974 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
976 rxf->vlan_filter_table[index] |= bit;
977 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
978 rxf->vlan_pending_bitmask |= (1 << group_id);
979 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
984 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
986 struct bna_rxf *rxf = &rx->rxf;
987 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
988 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
989 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
991 rxf->vlan_filter_table[index] &= ~bit;
992 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
993 rxf->vlan_pending_bitmask |= (1 << group_id);
994 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
999 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1001 struct bna_mac *mac = NULL;
1002 struct list_head *qe;
1004 /* Delete MAC addresses previousely added */
1005 if (!list_empty(&rxf->ucast_pending_del_q)) {
1006 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1008 mac = (struct bna_mac *)qe;
1009 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1010 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1014 /* Set default unicast MAC */
1015 if (rxf->ucast_pending_set) {
1016 rxf->ucast_pending_set = 0;
1017 memcpy(rxf->ucast_active_mac.addr,
1018 rxf->ucast_pending_mac->addr, ETH_ALEN);
1019 rxf->ucast_active_set = 1;
1020 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1021 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1025 /* Add additional MAC entries */
1026 if (!list_empty(&rxf->ucast_pending_add_q)) {
1027 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1029 mac = (struct bna_mac *)qe;
1030 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1031 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1039 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1041 struct list_head *qe;
1042 struct bna_mac *mac;
1044 /* Throw away delete pending ucast entries */
1045 while (!list_empty(&rxf->ucast_pending_del_q)) {
1046 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1048 mac = (struct bna_mac *)qe;
1049 if (cleanup == BNA_SOFT_CLEANUP)
1050 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1052 bna_bfi_ucast_req(rxf, mac,
1053 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1054 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1059 /* Move active ucast entries to pending_add_q */
1060 while (!list_empty(&rxf->ucast_active_q)) {
1061 bfa_q_deq(&rxf->ucast_active_q, &qe);
1063 list_add_tail(qe, &rxf->ucast_pending_add_q);
1064 if (cleanup == BNA_HARD_CLEANUP) {
1065 mac = (struct bna_mac *)qe;
1066 bna_bfi_ucast_req(rxf, mac,
1067 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1072 if (rxf->ucast_active_set) {
1073 rxf->ucast_pending_set = 1;
1074 rxf->ucast_active_set = 0;
1075 if (cleanup == BNA_HARD_CLEANUP) {
1076 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1077 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1086 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1088 struct bna *bna = rxf->rx->bna;
1090 /* Enable/disable promiscuous mode */
1091 if (is_promisc_enable(rxf->rxmode_pending,
1092 rxf->rxmode_pending_bitmask)) {
1093 /* move promisc configuration from pending -> active */
1094 promisc_inactive(rxf->rxmode_pending,
1095 rxf->rxmode_pending_bitmask);
1096 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1097 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1099 } else if (is_promisc_disable(rxf->rxmode_pending,
1100 rxf->rxmode_pending_bitmask)) {
1101 /* move promisc configuration from pending -> active */
1102 promisc_inactive(rxf->rxmode_pending,
1103 rxf->rxmode_pending_bitmask);
1104 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1105 bna->promisc_rid = BFI_INVALID_RID;
1106 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1114 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1116 struct bna *bna = rxf->rx->bna;
1118 /* Clear pending promisc mode disable */
1119 if (is_promisc_disable(rxf->rxmode_pending,
1120 rxf->rxmode_pending_bitmask)) {
1121 promisc_inactive(rxf->rxmode_pending,
1122 rxf->rxmode_pending_bitmask);
1123 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1124 bna->promisc_rid = BFI_INVALID_RID;
1125 if (cleanup == BNA_HARD_CLEANUP) {
1126 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1131 /* Move promisc mode config from active -> pending */
1132 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1133 promisc_enable(rxf->rxmode_pending,
1134 rxf->rxmode_pending_bitmask);
1135 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1136 if (cleanup == BNA_HARD_CLEANUP) {
1137 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1146 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1148 /* Enable/disable allmulti mode */
1149 if (is_allmulti_enable(rxf->rxmode_pending,
1150 rxf->rxmode_pending_bitmask)) {
1151 /* move allmulti configuration from pending -> active */
1152 allmulti_inactive(rxf->rxmode_pending,
1153 rxf->rxmode_pending_bitmask);
1154 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1155 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1157 } else if (is_allmulti_disable(rxf->rxmode_pending,
1158 rxf->rxmode_pending_bitmask)) {
1159 /* move allmulti configuration from pending -> active */
1160 allmulti_inactive(rxf->rxmode_pending,
1161 rxf->rxmode_pending_bitmask);
1162 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1163 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1171 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1173 /* Clear pending allmulti mode disable */
1174 if (is_allmulti_disable(rxf->rxmode_pending,
1175 rxf->rxmode_pending_bitmask)) {
1176 allmulti_inactive(rxf->rxmode_pending,
1177 rxf->rxmode_pending_bitmask);
1178 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1179 if (cleanup == BNA_HARD_CLEANUP) {
1180 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1185 /* Move allmulti mode config from active -> pending */
1186 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1187 allmulti_enable(rxf->rxmode_pending,
1188 rxf->rxmode_pending_bitmask);
1189 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1190 if (cleanup == BNA_HARD_CLEANUP) {
1191 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1200 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1202 struct bna *bna = rxf->rx->bna;
1205 if (is_promisc_enable(rxf->rxmode_pending,
1206 rxf->rxmode_pending_bitmask) ||
1207 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1208 /* Do nothing if pending enable or already enabled */
1209 } else if (is_promisc_disable(rxf->rxmode_pending,
1210 rxf->rxmode_pending_bitmask)) {
1211 /* Turn off pending disable command */
1212 promisc_inactive(rxf->rxmode_pending,
1213 rxf->rxmode_pending_bitmask);
1215 /* Schedule enable */
1216 promisc_enable(rxf->rxmode_pending,
1217 rxf->rxmode_pending_bitmask);
1218 bna->promisc_rid = rxf->rx->rid;
1226 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1228 struct bna *bna = rxf->rx->bna;
1231 if (is_promisc_disable(rxf->rxmode_pending,
1232 rxf->rxmode_pending_bitmask) ||
1233 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1234 /* Do nothing if pending disable or already disabled */
1235 } else if (is_promisc_enable(rxf->rxmode_pending,
1236 rxf->rxmode_pending_bitmask)) {
1237 /* Turn off pending enable command */
1238 promisc_inactive(rxf->rxmode_pending,
1239 rxf->rxmode_pending_bitmask);
1240 bna->promisc_rid = BFI_INVALID_RID;
1241 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1242 /* Schedule disable */
1243 promisc_disable(rxf->rxmode_pending,
1244 rxf->rxmode_pending_bitmask);
1252 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1256 if (is_allmulti_enable(rxf->rxmode_pending,
1257 rxf->rxmode_pending_bitmask) ||
1258 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1259 /* Do nothing if pending enable or already enabled */
1260 } else if (is_allmulti_disable(rxf->rxmode_pending,
1261 rxf->rxmode_pending_bitmask)) {
1262 /* Turn off pending disable command */
1263 allmulti_inactive(rxf->rxmode_pending,
1264 rxf->rxmode_pending_bitmask);
1266 /* Schedule enable */
1267 allmulti_enable(rxf->rxmode_pending,
1268 rxf->rxmode_pending_bitmask);
1276 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1280 if (is_allmulti_disable(rxf->rxmode_pending,
1281 rxf->rxmode_pending_bitmask) ||
1282 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1283 /* Do nothing if pending disable or already disabled */
1284 } else if (is_allmulti_enable(rxf->rxmode_pending,
1285 rxf->rxmode_pending_bitmask)) {
1286 /* Turn off pending enable command */
1287 allmulti_inactive(rxf->rxmode_pending,
1288 rxf->rxmode_pending_bitmask);
1289 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1290 /* Schedule disable */
1291 allmulti_disable(rxf->rxmode_pending,
1292 rxf->rxmode_pending_bitmask);
1300 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1302 if (rxf->vlan_strip_pending) {
1303 rxf->vlan_strip_pending = false;
1304 bna_bfi_vlan_strip_enable(rxf);
1313 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1314 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1316 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1317 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1319 #define call_rx_stop_cbfn(rx) \
1321 if ((rx)->stop_cbfn) { \
1322 void (*cbfn)(void *, struct bna_rx *); \
1324 cbfn = (rx)->stop_cbfn; \
1325 cbarg = (rx)->stop_cbarg; \
1326 (rx)->stop_cbfn = NULL; \
1327 (rx)->stop_cbarg = NULL; \
1332 #define call_rx_stall_cbfn(rx) \
1334 if ((rx)->rx_stall_cbfn) \
1335 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1338 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1340 struct bna_dma_addr cur_q_addr = \
1341 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1342 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1343 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1344 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1345 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1346 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1347 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1350 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1351 static void bna_rx_enet_stop(struct bna_rx *rx);
1352 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1354 bfa_fsm_state_decl(bna_rx, stopped,
1355 struct bna_rx, enum bna_rx_event);
1356 bfa_fsm_state_decl(bna_rx, start_wait,
1357 struct bna_rx, enum bna_rx_event);
1358 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1359 struct bna_rx, enum bna_rx_event);
1360 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1361 struct bna_rx, enum bna_rx_event);
1362 bfa_fsm_state_decl(bna_rx, started,
1363 struct bna_rx, enum bna_rx_event);
1364 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1365 struct bna_rx, enum bna_rx_event);
1366 bfa_fsm_state_decl(bna_rx, stop_wait,
1367 struct bna_rx, enum bna_rx_event);
1368 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1369 struct bna_rx, enum bna_rx_event);
1370 bfa_fsm_state_decl(bna_rx, failed,
1371 struct bna_rx, enum bna_rx_event);
1372 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1373 struct bna_rx, enum bna_rx_event);
1375 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1377 call_rx_stop_cbfn(rx);
1380 static void bna_rx_sm_stopped(struct bna_rx *rx,
1381 enum bna_rx_event event)
1385 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1389 call_rx_stop_cbfn(rx);
1397 bfa_sm_fault(event);
1402 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1404 bna_bfi_rx_enet_start(rx);
1408 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1413 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1418 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1419 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1423 bna_rx_enet_stop(rx);
1427 bfa_sm_fault(event);
1432 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1433 enum bna_rx_event event)
1437 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1441 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1445 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1449 bfa_sm_fault(event);
1454 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1456 rx->rx_post_cbfn(rx->bna->bnad, rx);
1457 bna_rxf_start(&rx->rxf);
1461 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1466 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1470 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1471 bna_rxf_fail(&rx->rxf);
1472 call_rx_stall_cbfn(rx);
1473 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1476 case RX_E_RXF_STARTED:
1477 bna_rxf_stop(&rx->rxf);
1480 case RX_E_RXF_STOPPED:
1481 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1482 call_rx_stall_cbfn(rx);
1483 bna_rx_enet_stop(rx);
1487 bfa_sm_fault(event);
1494 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1499 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1504 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1508 bna_rx_enet_stop(rx);
1512 bfa_sm_fault(event);
1517 bna_rx_sm_started_entry(struct bna_rx *rx)
1519 struct bna_rxp *rxp;
1520 struct list_head *qe_rxp;
1521 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1524 list_for_each(qe_rxp, &rx->rxp_q) {
1525 rxp = (struct bna_rxp *)qe_rxp;
1526 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1529 bna_ethport_cb_rx_started(&rx->bna->ethport);
1533 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1537 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1538 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1539 bna_rxf_stop(&rx->rxf);
1543 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1544 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1545 bna_rxf_fail(&rx->rxf);
1546 call_rx_stall_cbfn(rx);
1547 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1551 bfa_sm_fault(event);
1556 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1557 enum bna_rx_event event)
1561 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1565 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1566 bna_rxf_fail(&rx->rxf);
1567 call_rx_stall_cbfn(rx);
1568 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1571 case RX_E_RXF_STARTED:
1572 bfa_fsm_set_state(rx, bna_rx_sm_started);
1576 bfa_sm_fault(event);
1582 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1587 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1591 case RX_E_RXF_STOPPED:
1595 case RX_E_CLEANUP_DONE:
1596 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1600 bfa_sm_fault(event);
1606 bna_rx_sm_failed_entry(struct bna_rx *rx)
1611 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1615 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1619 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1623 case RX_E_RXF_STARTED:
1624 case RX_E_RXF_STOPPED:
1628 case RX_E_CLEANUP_DONE:
1629 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1633 bfa_sm_fault(event);
1638 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1643 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1647 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1651 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1654 case RX_E_CLEANUP_DONE:
1655 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1659 bfa_sm_fault(event);
1665 bna_bfi_rx_enet_start(struct bna_rx *rx)
1667 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1668 struct bna_rxp *rxp = NULL;
1669 struct bna_rxq *q0 = NULL, *q1 = NULL;
1670 struct list_head *rxp_qe;
1673 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1674 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1675 cfg_req->mh.num_entries = htons(
1676 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1678 cfg_req->num_queue_sets = rx->num_paths;
1679 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1681 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1682 rxp = (struct bna_rxp *)rxp_qe;
1684 GET_RXQS(rxp, q0, q1);
1685 switch (rxp->type) {
1689 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1691 cfg_req->q_cfg[i].qs.rx_buffer_size =
1692 htons((u16)q1->buffer_size);
1695 case BNA_RXP_SINGLE:
1696 /* Large/Single RxQ */
1697 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1700 bna_enet_mtu_get(&rx->bna->enet);
1701 cfg_req->q_cfg[i].ql.rx_buffer_size =
1702 htons((u16)q0->buffer_size);
1709 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1712 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1713 rxp->cq.ib.ib_seg_host_addr.lsb;
1714 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1715 rxp->cq.ib.ib_seg_host_addr.msb;
1716 cfg_req->q_cfg[i].ib.intr.msix_index =
1717 htons((u16)rxp->cq.ib.intr_vector);
1720 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1721 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1722 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1723 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1724 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1725 ? BNA_STATUS_T_ENABLED :
1726 BNA_STATUS_T_DISABLED;
1727 cfg_req->ib_cfg.coalescing_timeout =
1728 htonl((u32)rxp->cq.ib.coalescing_timeo);
1729 cfg_req->ib_cfg.inter_pkt_timeout =
1730 htonl((u32)rxp->cq.ib.interpkt_timeo);
1731 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1733 switch (rxp->type) {
1735 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1739 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1740 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1741 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1742 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1745 case BNA_RXP_SINGLE:
1746 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1752 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1754 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1755 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1756 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1760 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1762 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1764 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1765 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1766 req->mh.num_entries = htons(
1767 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1768 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1770 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1774 bna_rx_enet_stop(struct bna_rx *rx)
1776 struct bna_rxp *rxp;
1777 struct list_head *qe_rxp;
1780 list_for_each(qe_rxp, &rx->rxp_q) {
1781 rxp = (struct bna_rxp *)qe_rxp;
1782 bna_ib_stop(rx->bna, &rxp->cq.ib);
1785 bna_bfi_rx_enet_stop(rx);
1789 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1791 if ((rx_mod->rx_free_count == 0) ||
1792 (rx_mod->rxp_free_count == 0) ||
1793 (rx_mod->rxq_free_count == 0))
1796 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1797 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1798 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1801 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1802 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1809 static struct bna_rxq *
1810 bna_rxq_get(struct bna_rx_mod *rx_mod)
1812 struct bna_rxq *rxq = NULL;
1813 struct list_head *qe = NULL;
1815 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1816 rx_mod->rxq_free_count--;
1817 rxq = (struct bna_rxq *)qe;
1818 bfa_q_qe_init(&rxq->qe);
1824 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1826 bfa_q_qe_init(&rxq->qe);
1827 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1828 rx_mod->rxq_free_count++;
1831 static struct bna_rxp *
1832 bna_rxp_get(struct bna_rx_mod *rx_mod)
1834 struct list_head *qe = NULL;
1835 struct bna_rxp *rxp = NULL;
1837 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1838 rx_mod->rxp_free_count--;
1839 rxp = (struct bna_rxp *)qe;
1840 bfa_q_qe_init(&rxp->qe);
1846 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1848 bfa_q_qe_init(&rxp->qe);
1849 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1850 rx_mod->rxp_free_count++;
1853 static struct bna_rx *
1854 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1856 struct list_head *qe = NULL;
1857 struct bna_rx *rx = NULL;
1859 if (type == BNA_RX_T_REGULAR) {
1860 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1862 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1864 rx_mod->rx_free_count--;
1865 rx = (struct bna_rx *)qe;
1866 bfa_q_qe_init(&rx->qe);
1867 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1874 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1876 struct list_head *prev_qe = NULL;
1877 struct list_head *qe;
1879 bfa_q_qe_init(&rx->qe);
1881 list_for_each(qe, &rx_mod->rx_free_q) {
1882 if (((struct bna_rx *)qe)->rid < rx->rid)
1888 if (prev_qe == NULL) {
1889 /* This is the first entry */
1890 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1891 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1892 /* This is the last entry */
1893 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1895 /* Somewhere in the middle */
1896 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1897 bfa_q_prev(&rx->qe) = prev_qe;
1898 bfa_q_next(prev_qe) = &rx->qe;
1899 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1902 rx_mod->rx_free_count++;
1906 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1909 switch (rxp->type) {
1910 case BNA_RXP_SINGLE:
1911 rxp->rxq.single.only = q0;
1912 rxp->rxq.single.reserved = NULL;
1915 rxp->rxq.slr.large = q0;
1916 rxp->rxq.slr.small = q1;
1919 rxp->rxq.hds.data = q0;
1920 rxp->rxq.hds.hdr = q1;
1928 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1929 struct bna_rxp *rxp,
1932 struct bna_mem_descr *qpt_mem,
1933 struct bna_mem_descr *swqpt_mem,
1934 struct bna_mem_descr *page_mem)
1938 struct bna_dma_addr bna_dma;
1941 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1942 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1943 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1944 rxq->qpt.page_count = page_count;
1945 rxq->qpt.page_size = page_size;
1947 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1948 rxq->rcb->sw_q = page_mem->kva;
1950 kva = page_mem->kva;
1951 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1953 for (i = 0; i < rxq->qpt.page_count; i++) {
1954 rxq->rcb->sw_qpt[i] = kva;
1957 BNA_SET_DMA_ADDR(dma, &bna_dma);
1958 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1960 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1967 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1970 struct bna_mem_descr *qpt_mem,
1971 struct bna_mem_descr *swqpt_mem,
1972 struct bna_mem_descr *page_mem)
1976 struct bna_dma_addr bna_dma;
1979 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1980 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1981 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1982 rxp->cq.qpt.page_count = page_count;
1983 rxp->cq.qpt.page_size = page_size;
1985 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
1986 rxp->cq.ccb->sw_q = page_mem->kva;
1988 kva = page_mem->kva;
1989 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1991 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
1992 rxp->cq.ccb->sw_qpt[i] = kva;
1995 BNA_SET_DMA_ADDR(dma, &bna_dma);
1996 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
1998 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2005 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2007 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2009 bfa_wc_down(&rx_mod->rx_stop_wc);
2013 bna_rx_mod_cb_rx_stopped_all(void *arg)
2015 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2017 if (rx_mod->stop_cbfn)
2018 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2019 rx_mod->stop_cbfn = NULL;
2023 bna_rx_start(struct bna_rx *rx)
2025 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2026 if (rx->rx_flags & BNA_RX_F_ENABLED)
2027 bfa_fsm_send_event(rx, RX_E_START);
2031 bna_rx_stop(struct bna_rx *rx)
2033 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2034 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2035 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2037 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2038 rx->stop_cbarg = &rx->bna->rx_mod;
2039 bfa_fsm_send_event(rx, RX_E_STOP);
2044 bna_rx_fail(struct bna_rx *rx)
2046 /* Indicate Enet is not enabled, and failed */
2047 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2048 bfa_fsm_send_event(rx, RX_E_FAIL);
2052 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2055 struct list_head *qe;
2057 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2058 if (type == BNA_RX_T_LOOPBACK)
2059 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2061 list_for_each(qe, &rx_mod->rx_active_q) {
2062 rx = (struct bna_rx *)qe;
2063 if (rx->type == type)
2069 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2072 struct list_head *qe;
2074 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2075 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2077 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2079 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2081 list_for_each(qe, &rx_mod->rx_active_q) {
2082 rx = (struct bna_rx *)qe;
2083 if (rx->type == type) {
2084 bfa_wc_up(&rx_mod->rx_stop_wc);
2089 bfa_wc_wait(&rx_mod->rx_stop_wc);
2093 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2096 struct list_head *qe;
2098 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2099 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2101 list_for_each(qe, &rx_mod->rx_active_q) {
2102 rx = (struct bna_rx *)qe;
2107 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2108 struct bna_res_info *res_info)
2111 struct bna_rx *rx_ptr;
2112 struct bna_rxp *rxp_ptr;
2113 struct bna_rxq *rxq_ptr;
2118 rx_mod->rx = (struct bna_rx *)
2119 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2120 rx_mod->rxp = (struct bna_rxp *)
2121 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2122 rx_mod->rxq = (struct bna_rxq *)
2123 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2125 /* Initialize the queues */
2126 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2127 rx_mod->rx_free_count = 0;
2128 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2129 rx_mod->rxq_free_count = 0;
2130 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2131 rx_mod->rxp_free_count = 0;
2132 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2134 /* Build RX queues */
2135 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2136 rx_ptr = &rx_mod->rx[index];
2138 bfa_q_qe_init(&rx_ptr->qe);
2139 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2141 rx_ptr->rid = index;
2142 rx_ptr->stop_cbfn = NULL;
2143 rx_ptr->stop_cbarg = NULL;
2145 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2146 rx_mod->rx_free_count++;
2149 /* build RX-path queue */
2150 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2151 rxp_ptr = &rx_mod->rxp[index];
2152 bfa_q_qe_init(&rxp_ptr->qe);
2153 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2154 rx_mod->rxp_free_count++;
2157 /* build RXQ queue */
2158 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2159 rxq_ptr = &rx_mod->rxq[index];
2160 bfa_q_qe_init(&rxq_ptr->qe);
2161 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2162 rx_mod->rxq_free_count++;
2167 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2169 struct list_head *qe;
2173 list_for_each(qe, &rx_mod->rx_free_q)
2177 list_for_each(qe, &rx_mod->rxp_free_q)
2181 list_for_each(qe, &rx_mod->rxq_free_q)
2188 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2190 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2191 struct bna_rxp *rxp = NULL;
2192 struct bna_rxq *q0 = NULL, *q1 = NULL;
2193 struct list_head *rxp_qe;
2196 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2197 sizeof(struct bfi_enet_rx_cfg_rsp));
2199 rx->hw_id = cfg_rsp->hw_id;
2201 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2203 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2204 rxp = (struct bna_rxp *)rxp_qe;
2205 GET_RXQS(rxp, q0, q1);
2207 /* Setup doorbells */
2208 rxp->cq.ccb->i_dbell->doorbell_addr =
2209 rx->bna->pcidev.pci_bar_kva
2210 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2211 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2213 rx->bna->pcidev.pci_bar_kva
2214 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2215 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2218 rx->bna->pcidev.pci_bar_kva
2219 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2220 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2223 /* Initialize producer/consumer indexes */
2224 (*rxp->cq.ccb->hw_producer_index) = 0;
2225 rxp->cq.ccb->producer_index = 0;
2226 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2228 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2231 bfa_fsm_send_event(rx, RX_E_STARTED);
2235 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2237 bfa_fsm_send_event(rx, RX_E_STOPPED);
2241 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2243 u32 cq_size, hq_size, dq_size;
2244 u32 cpage_count, hpage_count, dpage_count;
2245 struct bna_mem_info *mem_info;
2250 dq_depth = q_cfg->q_depth;
2251 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2252 cq_depth = dq_depth + hq_depth;
2254 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2255 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2256 cq_size = ALIGN(cq_size, PAGE_SIZE);
2257 cpage_count = SIZE_TO_PAGES(cq_size);
2259 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2260 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2261 dq_size = ALIGN(dq_size, PAGE_SIZE);
2262 dpage_count = SIZE_TO_PAGES(dq_size);
2264 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2265 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2266 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2267 hq_size = ALIGN(hq_size, PAGE_SIZE);
2268 hpage_count = SIZE_TO_PAGES(hq_size);
2272 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2273 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2274 mem_info->mem_type = BNA_MEM_T_KVA;
2275 mem_info->len = sizeof(struct bna_ccb);
2276 mem_info->num = q_cfg->num_paths;
2278 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2279 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2280 mem_info->mem_type = BNA_MEM_T_KVA;
2281 mem_info->len = sizeof(struct bna_rcb);
2282 mem_info->num = BNA_GET_RXQS(q_cfg);
2284 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2285 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2286 mem_info->mem_type = BNA_MEM_T_DMA;
2287 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2288 mem_info->num = q_cfg->num_paths;
2290 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2291 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2292 mem_info->mem_type = BNA_MEM_T_KVA;
2293 mem_info->len = cpage_count * sizeof(void *);
2294 mem_info->num = q_cfg->num_paths;
2296 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2297 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2298 mem_info->mem_type = BNA_MEM_T_DMA;
2299 mem_info->len = PAGE_SIZE * cpage_count;
2300 mem_info->num = q_cfg->num_paths;
2302 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2303 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2304 mem_info->mem_type = BNA_MEM_T_DMA;
2305 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2306 mem_info->num = q_cfg->num_paths;
2308 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2309 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2310 mem_info->mem_type = BNA_MEM_T_KVA;
2311 mem_info->len = dpage_count * sizeof(void *);
2312 mem_info->num = q_cfg->num_paths;
2314 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2315 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2316 mem_info->mem_type = BNA_MEM_T_DMA;
2317 mem_info->len = PAGE_SIZE * dpage_count;
2318 mem_info->num = q_cfg->num_paths;
2320 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2321 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2322 mem_info->mem_type = BNA_MEM_T_DMA;
2323 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2324 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2326 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2327 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2328 mem_info->mem_type = BNA_MEM_T_KVA;
2329 mem_info->len = hpage_count * sizeof(void *);
2330 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2332 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2333 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2334 mem_info->mem_type = BNA_MEM_T_DMA;
2335 mem_info->len = PAGE_SIZE * hpage_count;
2336 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2338 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2339 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2340 mem_info->mem_type = BNA_MEM_T_DMA;
2341 mem_info->len = BFI_IBIDX_SIZE;
2342 mem_info->num = q_cfg->num_paths;
2344 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2345 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2346 mem_info->mem_type = BNA_MEM_T_KVA;
2347 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2350 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2351 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2352 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2356 bna_rx_create(struct bna *bna, struct bnad *bnad,
2357 struct bna_rx_config *rx_cfg,
2358 const struct bna_rx_event_cbfn *rx_cbfn,
2359 struct bna_res_info *res_info,
2362 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2364 struct bna_rxp *rxp;
2367 struct bna_intr_info *intr_info;
2369 struct bna_mem_descr *ccb_mem;
2370 struct bna_mem_descr *rcb_mem;
2371 struct bna_mem_descr *unmapq_mem;
2372 struct bna_mem_descr *cqpt_mem;
2373 struct bna_mem_descr *cswqpt_mem;
2374 struct bna_mem_descr *cpage_mem;
2375 struct bna_mem_descr *hqpt_mem;
2376 struct bna_mem_descr *dqpt_mem;
2377 struct bna_mem_descr *hsqpt_mem;
2378 struct bna_mem_descr *dsqpt_mem;
2379 struct bna_mem_descr *hpage_mem;
2380 struct bna_mem_descr *dpage_mem;
2382 int dpage_count, hpage_count, rcb_idx;
2384 if (!bna_rx_res_check(rx_mod, rx_cfg))
2387 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2388 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2389 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2390 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2391 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2392 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2393 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2394 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2395 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2396 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2397 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2398 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2399 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2401 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2404 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2407 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2410 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2413 INIT_LIST_HEAD(&rx->rxp_q);
2414 rx->stop_cbfn = NULL;
2415 rx->stop_cbarg = NULL;
2418 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2419 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2420 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2421 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2422 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2423 /* Following callbacks are mandatory */
2424 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2425 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2427 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2429 case BNA_RX_T_REGULAR:
2430 if (!(rx->bna->rx_mod.flags &
2431 BNA_RX_MOD_F_ENET_LOOPBACK))
2432 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2434 case BNA_RX_T_LOOPBACK:
2435 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2436 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2441 rx->num_paths = rx_cfg->num_paths;
2442 for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
2443 rxp = bna_rxp_get(rx_mod);
2444 list_add_tail(&rxp->qe, &rx->rxp_q);
2445 rxp->type = rx_cfg->rxp_type;
2449 q0 = bna_rxq_get(rx_mod);
2450 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2453 q1 = bna_rxq_get(rx_mod);
2455 if (1 == intr_info->num)
2456 rxp->vector = intr_info->idl[0].vector;
2458 rxp->vector = intr_info->idl[i].vector;
2462 rxp->cq.ib.ib_seg_host_addr.lsb =
2463 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2464 rxp->cq.ib.ib_seg_host_addr.msb =
2465 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2466 rxp->cq.ib.ib_seg_host_addr_kva =
2467 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2468 rxp->cq.ib.intr_type = intr_info->intr_type;
2469 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2470 rxp->cq.ib.intr_vector = rxp->vector;
2472 rxp->cq.ib.intr_vector = (1 << rxp->vector);
2473 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2474 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2475 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2477 bna_rxp_add_rxqs(rxp, q0, q1);
2484 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2485 q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2487 q0->rcb->q_depth = rx_cfg->q_depth;
2489 q0->rcb->bnad = bna->bnad;
2491 q0->rx_packets = q0->rx_bytes = 0;
2492 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2494 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2495 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2497 if (rx->rcb_setup_cbfn)
2498 rx->rcb_setup_cbfn(bnad, q0->rcb);
2506 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2507 q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2509 q1->rcb->q_depth = rx_cfg->q_depth;
2511 q1->rcb->bnad = bna->bnad;
2513 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2514 rx_cfg->hds_config.forced_offset
2515 : rx_cfg->small_buff_size;
2516 q1->rx_packets = q1->rx_bytes = 0;
2517 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2519 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2520 &hqpt_mem[i], &hsqpt_mem[i],
2523 if (rx->rcb_setup_cbfn)
2524 rx->rcb_setup_cbfn(bnad, q1->rcb);
2529 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2530 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
2531 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2532 0 : rx_cfg->q_depth);
2533 rxp->cq.ccb->cq = &rxp->cq;
2534 rxp->cq.ccb->rcb[0] = q0->rcb;
2535 q0->rcb->ccb = rxp->cq.ccb;
2537 rxp->cq.ccb->rcb[1] = q1->rcb;
2538 q1->rcb->ccb = rxp->cq.ccb;
2540 rxp->cq.ccb->hw_producer_index =
2541 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2542 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2543 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2544 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2545 rxp->cq.ccb->rx_coalescing_timeo =
2546 rxp->cq.ib.coalescing_timeo;
2547 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2548 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2549 rxp->cq.ccb->bnad = bna->bnad;
2550 rxp->cq.ccb->id = i;
2552 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2553 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2555 if (rx->ccb_setup_cbfn)
2556 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2559 rx->hds_cfg = rx_cfg->hds_config;
2561 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2563 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2565 rx_mod->rid_mask |= (1 << rx->rid);
2571 bna_rx_destroy(struct bna_rx *rx)
2573 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2574 struct bna_rxq *q0 = NULL;
2575 struct bna_rxq *q1 = NULL;
2576 struct bna_rxp *rxp;
2577 struct list_head *qe;
2579 bna_rxf_uninit(&rx->rxf);
2581 while (!list_empty(&rx->rxp_q)) {
2582 bfa_q_deq(&rx->rxp_q, &rxp);
2583 GET_RXQS(rxp, q0, q1);
2584 if (rx->rcb_destroy_cbfn)
2585 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2589 bna_rxq_put(rx_mod, q0);
2592 if (rx->rcb_destroy_cbfn)
2593 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2597 bna_rxq_put(rx_mod, q1);
2599 rxp->rxq.slr.large = NULL;
2600 rxp->rxq.slr.small = NULL;
2602 if (rx->ccb_destroy_cbfn)
2603 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2606 bna_rxp_put(rx_mod, rxp);
2609 list_for_each(qe, &rx_mod->rx_active_q) {
2610 if (qe == &rx->qe) {
2612 bfa_q_qe_init(&rx->qe);
2617 rx_mod->rid_mask &= ~(1 << rx->rid);
2621 bna_rx_put(rx_mod, rx);
2625 bna_rx_enable(struct bna_rx *rx)
2627 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2630 rx->rx_flags |= BNA_RX_F_ENABLED;
2631 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2632 bfa_fsm_send_event(rx, RX_E_START);
2636 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2637 void (*cbfn)(void *, struct bna_rx *))
2639 if (type == BNA_SOFT_CLEANUP) {
2640 /* h/w should not be accessed. Treat we're stopped */
2641 (*cbfn)(rx->bna->bnad, rx);
2643 rx->stop_cbfn = cbfn;
2644 rx->stop_cbarg = rx->bna->bnad;
2646 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2648 bfa_fsm_send_event(rx, RX_E_STOP);
2653 bna_rx_cleanup_complete(struct bna_rx *rx)
2655 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2659 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2660 enum bna_rxmode bitmask,
2661 void (*cbfn)(struct bnad *, struct bna_rx *))
2663 struct bna_rxf *rxf = &rx->rxf;
2664 int need_hw_config = 0;
2668 if (is_promisc_enable(new_mode, bitmask)) {
2669 /* If promisc mode is already enabled elsewhere in the system */
2670 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2671 (rx->bna->promisc_rid != rxf->rx->rid))
2674 /* If default mode is already enabled in the system */
2675 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2678 /* Trying to enable promiscuous and default mode together */
2679 if (is_default_enable(new_mode, bitmask))
2683 if (is_default_enable(new_mode, bitmask)) {
2684 /* If default mode is already enabled elsewhere in the system */
2685 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2686 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2690 /* If promiscuous mode is already enabled in the system */
2691 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2695 /* Process the commands */
2697 if (is_promisc_enable(new_mode, bitmask)) {
2698 if (bna_rxf_promisc_enable(rxf))
2700 } else if (is_promisc_disable(new_mode, bitmask)) {
2701 if (bna_rxf_promisc_disable(rxf))
2705 if (is_allmulti_enable(new_mode, bitmask)) {
2706 if (bna_rxf_allmulti_enable(rxf))
2708 } else if (is_allmulti_disable(new_mode, bitmask)) {
2709 if (bna_rxf_allmulti_disable(rxf))
2713 /* Trigger h/w if needed */
2715 if (need_hw_config) {
2716 rxf->cam_fltr_cbfn = cbfn;
2717 rxf->cam_fltr_cbarg = rx->bna->bnad;
2718 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2720 (*cbfn)(rx->bna->bnad, rx);
2722 return BNA_CB_SUCCESS;
2729 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2731 struct bna_rxf *rxf = &rx->rxf;
2733 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2734 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2735 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2736 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2741 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2743 struct bna_rxp *rxp;
2744 struct list_head *qe;
2746 list_for_each(qe, &rx->rxp_q) {
2747 rxp = (struct bna_rxp *)qe;
2748 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2749 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2754 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2758 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2759 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2760 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2764 bna_rx_dim_update(struct bna_ccb *ccb)
2766 struct bna *bna = ccb->cq->rx->bna;
2768 u32 pkt_rt, small_rt, large_rt;
2769 u8 coalescing_timeo;
2771 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2772 (ccb->pkt_rate.large_pkt_cnt == 0))
2775 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2777 small_rt = ccb->pkt_rate.small_pkt_cnt;
2778 large_rt = ccb->pkt_rate.large_pkt_cnt;
2780 pkt_rt = small_rt + large_rt;
2782 if (pkt_rt < BNA_PKT_RATE_10K)
2783 load = BNA_LOAD_T_LOW_4;
2784 else if (pkt_rt < BNA_PKT_RATE_20K)
2785 load = BNA_LOAD_T_LOW_3;
2786 else if (pkt_rt < BNA_PKT_RATE_30K)
2787 load = BNA_LOAD_T_LOW_2;
2788 else if (pkt_rt < BNA_PKT_RATE_40K)
2789 load = BNA_LOAD_T_LOW_1;
2790 else if (pkt_rt < BNA_PKT_RATE_50K)
2791 load = BNA_LOAD_T_HIGH_1;
2792 else if (pkt_rt < BNA_PKT_RATE_60K)
2793 load = BNA_LOAD_T_HIGH_2;
2794 else if (pkt_rt < BNA_PKT_RATE_80K)
2795 load = BNA_LOAD_T_HIGH_3;
2797 load = BNA_LOAD_T_HIGH_4;
2799 if (small_rt > (large_rt << 1))
2804 ccb->pkt_rate.small_pkt_cnt = 0;
2805 ccb->pkt_rate.large_pkt_cnt = 0;
2807 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2808 ccb->rx_coalescing_timeo = coalescing_timeo;
2811 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2814 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2827 #define call_tx_stop_cbfn(tx) \
2829 if ((tx)->stop_cbfn) { \
2830 void (*cbfn)(void *, struct bna_tx *); \
2832 cbfn = (tx)->stop_cbfn; \
2833 cbarg = (tx)->stop_cbarg; \
2834 (tx)->stop_cbfn = NULL; \
2835 (tx)->stop_cbarg = NULL; \
2836 cbfn(cbarg, (tx)); \
2840 #define call_tx_prio_change_cbfn(tx) \
2842 if ((tx)->prio_change_cbfn) { \
2843 void (*cbfn)(struct bnad *, struct bna_tx *); \
2844 cbfn = (tx)->prio_change_cbfn; \
2845 (tx)->prio_change_cbfn = NULL; \
2846 cbfn((tx)->bna->bnad, (tx)); \
2850 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2851 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2852 static void bna_tx_enet_stop(struct bna_tx *tx);
2860 TX_E_PRIO_CHANGE = 6,
2861 TX_E_CLEANUP_DONE = 7,
2865 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2866 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2867 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2868 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2869 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2871 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2873 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2875 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2876 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2880 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2882 call_tx_stop_cbfn(tx);
2886 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2890 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2894 call_tx_stop_cbfn(tx);
2901 case TX_E_PRIO_CHANGE:
2902 call_tx_prio_change_cbfn(tx);
2905 case TX_E_BW_UPDATE:
2910 bfa_sm_fault(event);
2915 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2917 bna_bfi_tx_enet_start(tx);
2921 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2925 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2926 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2930 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2931 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2935 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
2936 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
2937 BNA_TX_F_BW_UPDATED);
2938 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2940 bfa_fsm_set_state(tx, bna_tx_sm_started);
2943 case TX_E_PRIO_CHANGE:
2944 tx->flags |= BNA_TX_F_PRIO_CHANGED;
2947 case TX_E_BW_UPDATE:
2948 tx->flags |= BNA_TX_F_BW_UPDATED;
2952 bfa_sm_fault(event);
2957 bna_tx_sm_started_entry(struct bna_tx *tx)
2959 struct bna_txq *txq;
2960 struct list_head *qe;
2961 int is_regular = (tx->type == BNA_TX_T_REGULAR);
2963 list_for_each(qe, &tx->txq_q) {
2964 txq = (struct bna_txq *)qe;
2965 txq->tcb->priority = txq->priority;
2967 bna_ib_start(tx->bna, &txq->ib, is_regular);
2969 tx->tx_resume_cbfn(tx->bna->bnad, tx);
2973 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2977 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2978 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2979 bna_tx_enet_stop(tx);
2983 bfa_fsm_set_state(tx, bna_tx_sm_failed);
2984 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2985 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2988 case TX_E_PRIO_CHANGE:
2989 case TX_E_BW_UPDATE:
2990 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2994 bfa_sm_fault(event);
2999 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3004 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3009 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3010 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3015 * We are here due to start_wait -> stop_wait transition on
3018 bna_tx_enet_stop(tx);
3021 case TX_E_PRIO_CHANGE:
3022 case TX_E_BW_UPDATE:
3027 bfa_sm_fault(event);
3032 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3037 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3041 case TX_E_PRIO_CHANGE:
3042 case TX_E_BW_UPDATE:
3046 case TX_E_CLEANUP_DONE:
3047 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3051 bfa_sm_fault(event);
3056 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3058 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3059 bna_tx_enet_stop(tx);
3063 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3067 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3071 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3072 call_tx_prio_change_cbfn(tx);
3073 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3077 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3080 case TX_E_PRIO_CHANGE:
3081 case TX_E_BW_UPDATE:
3086 bfa_sm_fault(event);
3091 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3093 call_tx_prio_change_cbfn(tx);
3094 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3098 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3102 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3106 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3109 case TX_E_PRIO_CHANGE:
3110 case TX_E_BW_UPDATE:
3114 case TX_E_CLEANUP_DONE:
3115 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3119 bfa_sm_fault(event);
3124 bna_tx_sm_failed_entry(struct bna_tx *tx)
3129 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3133 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3137 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3144 case TX_E_CLEANUP_DONE:
3145 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3149 bfa_sm_fault(event);
3154 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3159 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3163 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3167 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3170 case TX_E_CLEANUP_DONE:
3171 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3174 case TX_E_BW_UPDATE:
3179 bfa_sm_fault(event);
3184 bna_bfi_tx_enet_start(struct bna_tx *tx)
3186 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3187 struct bna_txq *txq = NULL;
3188 struct list_head *qe;
3191 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3192 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3193 cfg_req->mh.num_entries = htons(
3194 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3196 cfg_req->num_queues = tx->num_txq;
3197 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3199 i++, qe = bfa_q_next(qe)) {
3200 txq = (struct bna_txq *)qe;
3202 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3203 cfg_req->q_cfg[i].q.priority = txq->priority;
3205 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3206 txq->ib.ib_seg_host_addr.lsb;
3207 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3208 txq->ib.ib_seg_host_addr.msb;
3209 cfg_req->q_cfg[i].ib.intr.msix_index =
3210 htons((u16)txq->ib.intr_vector);
3213 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3214 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3215 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3216 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3217 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3218 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3219 cfg_req->ib_cfg.coalescing_timeout =
3220 htonl((u32)txq->ib.coalescing_timeo);
3221 cfg_req->ib_cfg.inter_pkt_timeout =
3222 htonl((u32)txq->ib.interpkt_timeo);
3223 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3225 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3226 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3227 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
3228 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3230 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3231 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3232 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3236 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3238 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3240 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3241 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3242 req->mh.num_entries = htons(
3243 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3244 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3246 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3250 bna_tx_enet_stop(struct bna_tx *tx)
3252 struct bna_txq *txq;
3253 struct list_head *qe;
3256 list_for_each(qe, &tx->txq_q) {
3257 txq = (struct bna_txq *)qe;
3258 bna_ib_stop(tx->bna, &txq->ib);
3261 bna_bfi_tx_enet_stop(tx);
3265 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3266 struct bna_mem_descr *qpt_mem,
3267 struct bna_mem_descr *swqpt_mem,
3268 struct bna_mem_descr *page_mem)
3272 struct bna_dma_addr bna_dma;
3275 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3276 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3277 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3278 txq->qpt.page_count = page_count;
3279 txq->qpt.page_size = page_size;
3281 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3282 txq->tcb->sw_q = page_mem->kva;
3284 kva = page_mem->kva;
3285 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3287 for (i = 0; i < page_count; i++) {
3288 txq->tcb->sw_qpt[i] = kva;
3291 BNA_SET_DMA_ADDR(dma, &bna_dma);
3292 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3294 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3300 static struct bna_tx *
3301 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3303 struct list_head *qe = NULL;
3304 struct bna_tx *tx = NULL;
3306 if (list_empty(&tx_mod->tx_free_q))
3308 if (type == BNA_TX_T_REGULAR) {
3309 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3311 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3313 tx = (struct bna_tx *)qe;
3314 bfa_q_qe_init(&tx->qe);
3321 bna_tx_free(struct bna_tx *tx)
3323 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3324 struct bna_txq *txq;
3325 struct list_head *prev_qe;
3326 struct list_head *qe;
3328 while (!list_empty(&tx->txq_q)) {
3329 bfa_q_deq(&tx->txq_q, &txq);
3330 bfa_q_qe_init(&txq->qe);
3333 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3336 list_for_each(qe, &tx_mod->tx_active_q) {
3337 if (qe == &tx->qe) {
3339 bfa_q_qe_init(&tx->qe);
3348 list_for_each(qe, &tx_mod->tx_free_q) {
3349 if (((struct bna_tx *)qe)->rid < tx->rid)
3356 if (prev_qe == NULL) {
3357 /* This is the first entry */
3358 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3359 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3360 /* This is the last entry */
3361 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3363 /* Somewhere in the middle */
3364 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3365 bfa_q_prev(&tx->qe) = prev_qe;
3366 bfa_q_next(prev_qe) = &tx->qe;
3367 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3372 bna_tx_start(struct bna_tx *tx)
3374 tx->flags |= BNA_TX_F_ENET_STARTED;
3375 if (tx->flags & BNA_TX_F_ENABLED)
3376 bfa_fsm_send_event(tx, TX_E_START);
3380 bna_tx_stop(struct bna_tx *tx)
3382 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3383 tx->stop_cbarg = &tx->bna->tx_mod;
3385 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3386 bfa_fsm_send_event(tx, TX_E_STOP);
3390 bna_tx_fail(struct bna_tx *tx)
3392 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3393 bfa_fsm_send_event(tx, TX_E_FAIL);
3397 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3399 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3400 struct bna_txq *txq = NULL;
3401 struct list_head *qe;
3404 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3405 sizeof(struct bfi_enet_tx_cfg_rsp));
3407 tx->hw_id = cfg_rsp->hw_id;
3409 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3410 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3411 txq = (struct bna_txq *)qe;
3413 /* Setup doorbells */
3414 txq->tcb->i_dbell->doorbell_addr =
3415 tx->bna->pcidev.pci_bar_kva
3416 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3418 tx->bna->pcidev.pci_bar_kva
3419 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3420 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3422 /* Initialize producer/consumer indexes */
3423 (*txq->tcb->hw_consumer_index) = 0;
3424 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3427 bfa_fsm_send_event(tx, TX_E_STARTED);
3431 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3433 bfa_fsm_send_event(tx, TX_E_STOPPED);
3437 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3440 struct list_head *qe;
3442 list_for_each(qe, &tx_mod->tx_active_q) {
3443 tx = (struct bna_tx *)qe;
3444 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3449 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3453 struct bna_mem_info *mem_info;
3455 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3456 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3457 mem_info->mem_type = BNA_MEM_T_KVA;
3458 mem_info->len = sizeof(struct bna_tcb);
3459 mem_info->num = num_txq;
3461 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3462 q_size = ALIGN(q_size, PAGE_SIZE);
3463 page_count = q_size >> PAGE_SHIFT;
3465 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3466 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3467 mem_info->mem_type = BNA_MEM_T_DMA;
3468 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3469 mem_info->num = num_txq;
3471 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3472 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3473 mem_info->mem_type = BNA_MEM_T_KVA;
3474 mem_info->len = page_count * sizeof(void *);
3475 mem_info->num = num_txq;
3477 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3478 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3479 mem_info->mem_type = BNA_MEM_T_DMA;
3480 mem_info->len = PAGE_SIZE * page_count;
3481 mem_info->num = num_txq;
3483 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3484 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3485 mem_info->mem_type = BNA_MEM_T_DMA;
3486 mem_info->len = BFI_IBIDX_SIZE;
3487 mem_info->num = num_txq;
3489 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3490 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3492 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3496 bna_tx_create(struct bna *bna, struct bnad *bnad,
3497 struct bna_tx_config *tx_cfg,
3498 const struct bna_tx_event_cbfn *tx_cbfn,
3499 struct bna_res_info *res_info, void *priv)
3501 struct bna_intr_info *intr_info;
3502 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3504 struct bna_txq *txq;
3505 struct list_head *qe;
3509 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3510 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3517 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3522 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3530 INIT_LIST_HEAD(&tx->txq_q);
3531 for (i = 0; i < tx_cfg->num_txq; i++) {
3532 if (list_empty(&tx_mod->txq_free_q))
3535 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3536 bfa_q_qe_init(&txq->qe);
3537 list_add_tail(&txq->qe, &tx->txq_q);
3547 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3548 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3549 /* Following callbacks are mandatory */
3550 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3551 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3552 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3554 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3556 tx->num_txq = tx_cfg->num_txq;
3559 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3561 case BNA_TX_T_REGULAR:
3562 if (!(tx->bna->tx_mod.flags &
3563 BNA_TX_MOD_F_ENET_LOOPBACK))
3564 tx->flags |= BNA_TX_F_ENET_STARTED;
3566 case BNA_TX_T_LOOPBACK:
3567 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3568 tx->flags |= BNA_TX_F_ENET_STARTED;
3576 list_for_each(qe, &tx->txq_q) {
3577 txq = (struct bna_txq *)qe;
3578 txq->tcb = (struct bna_tcb *)
3579 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3580 txq->tx_packets = 0;
3584 txq->ib.ib_seg_host_addr.lsb =
3585 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3586 txq->ib.ib_seg_host_addr.msb =
3587 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3588 txq->ib.ib_seg_host_addr_kva =
3589 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3590 txq->ib.intr_type = intr_info->intr_type;
3591 txq->ib.intr_vector = (intr_info->num == 1) ?
3592 intr_info->idl[0].vector :
3593 intr_info->idl[i].vector;
3594 if (intr_info->intr_type == BNA_INTR_T_INTX)
3595 txq->ib.intr_vector = (1 << txq->ib.intr_vector);
3596 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3597 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3598 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3602 txq->tcb->q_depth = tx_cfg->txq_depth;
3603 txq->tcb->unmap_q = (void *)
3604 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3605 txq->tcb->hw_consumer_index =
3606 (u32 *)txq->ib.ib_seg_host_addr_kva;
3607 txq->tcb->i_dbell = &txq->ib.door_bell;
3608 txq->tcb->intr_type = txq->ib.intr_type;
3609 txq->tcb->intr_vector = txq->ib.intr_vector;
3610 txq->tcb->txq = txq;
3611 txq->tcb->bnad = bnad;
3614 /* QPT, SWQPT, Pages */
3615 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3616 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3617 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3618 &res_info[BNA_TX_RES_MEM_T_PAGE].
3619 res_u.mem_info.mdl[i]);
3621 /* Callback to bnad for setting up TCB */
3622 if (tx->tcb_setup_cbfn)
3623 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3625 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3626 txq->priority = txq->tcb->id;
3628 txq->priority = tx_mod->default_prio;
3633 tx->txf_vlan_id = 0;
3635 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3637 tx_mod->rid_mask |= (1 << tx->rid);
3647 bna_tx_destroy(struct bna_tx *tx)
3649 struct bna_txq *txq;
3650 struct list_head *qe;
3652 list_for_each(qe, &tx->txq_q) {
3653 txq = (struct bna_txq *)qe;
3654 if (tx->tcb_destroy_cbfn)
3655 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3658 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3663 bna_tx_enable(struct bna_tx *tx)
3665 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3668 tx->flags |= BNA_TX_F_ENABLED;
3670 if (tx->flags & BNA_TX_F_ENET_STARTED)
3671 bfa_fsm_send_event(tx, TX_E_START);
3675 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3676 void (*cbfn)(void *, struct bna_tx *))
3678 if (type == BNA_SOFT_CLEANUP) {
3679 (*cbfn)(tx->bna->bnad, tx);
3683 tx->stop_cbfn = cbfn;
3684 tx->stop_cbarg = tx->bna->bnad;
3686 tx->flags &= ~BNA_TX_F_ENABLED;
3688 bfa_fsm_send_event(tx, TX_E_STOP);
3692 bna_tx_cleanup_complete(struct bna_tx *tx)
3694 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3698 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3700 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3702 bfa_wc_down(&tx_mod->tx_stop_wc);
3706 bna_tx_mod_cb_tx_stopped_all(void *arg)
3708 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3710 if (tx_mod->stop_cbfn)
3711 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3712 tx_mod->stop_cbfn = NULL;
3716 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3717 struct bna_res_info *res_info)
3724 tx_mod->tx = (struct bna_tx *)
3725 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3726 tx_mod->txq = (struct bna_txq *)
3727 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3729 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3730 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3732 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3734 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3735 tx_mod->tx[i].rid = i;
3736 bfa_q_qe_init(&tx_mod->tx[i].qe);
3737 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3738 bfa_q_qe_init(&tx_mod->txq[i].qe);
3739 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3742 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3743 tx_mod->default_prio = 0;
3744 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3745 tx_mod->iscsi_prio = -1;
3749 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3751 struct list_head *qe;
3755 list_for_each(qe, &tx_mod->tx_free_q)
3759 list_for_each(qe, &tx_mod->txq_free_q)
3766 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3769 struct list_head *qe;
3771 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3772 if (type == BNA_TX_T_LOOPBACK)
3773 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3775 list_for_each(qe, &tx_mod->tx_active_q) {
3776 tx = (struct bna_tx *)qe;
3777 if (tx->type == type)
3783 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3786 struct list_head *qe;
3788 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3789 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3791 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3793 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3795 list_for_each(qe, &tx_mod->tx_active_q) {
3796 tx = (struct bna_tx *)qe;
3797 if (tx->type == type) {
3798 bfa_wc_up(&tx_mod->tx_stop_wc);
3803 bfa_wc_wait(&tx_mod->tx_stop_wc);
3807 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3810 struct list_head *qe;
3812 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3813 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3815 list_for_each(qe, &tx_mod->tx_active_q) {
3816 tx = (struct bna_tx *)qe;
3822 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3824 struct bna_txq *txq;
3825 struct list_head *qe;
3827 list_for_each(qe, &tx->txq_q) {
3828 txq = (struct bna_txq *)qe;
3829 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);