} \
} while (0)
+#define call_rx_stall_cbfn(rx) \
+do { \
+ if ((rx)->rx_stall_cbfn) \
+ (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
+} while (0)
+
#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
do { \
struct bna_dma_addr cur_q_addr = \
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
bna_rxf_fail(&rx->rxf);
+ call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
case RX_E_RXF_STOPPED:
bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
+ call_rx_stall_cbfn(rx);
bna_rx_enet_stop(rx);
break;
bfa_fsm_set_state(rx, bna_rx_sm_failed);
bna_ethport_cb_rx_stopped(&rx->bna->ethport);
bna_rxf_fail(&rx->rxf);
+ call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_failed);
bna_rxf_fail(&rx->rxf);
+ call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
+ rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
/* Following callbacks are mandatory */
rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
+ void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *);
/* Mandatory */
void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *);
void (*rx_post_cbfn)(struct bnad *, struct bna_rx *);
unmap_q->producer_index = unmap_prod;
rcb->producer_index = unmap_prod;
smp_mb();
- if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
+ if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
bna_rxq_prod_indx_doorbell(rcb);
}
}
}
static void
+bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
+{
+ struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
+ struct bna_ccb *ccb;
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+ rx_ctrl = &rx_info->rx_ctrl[i];
+ ccb = rx_ctrl->ccb;
+ if (!ccb)
+ continue;
+
+ clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
+
+ if (ccb->rcb[1])
+ clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
+ }
+}
+
+static void
bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
{
struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
bnad_free_all_rxbufs(bnad, rcb);
set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+ set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
unmap_q = rcb->unmap_q;
/* Now allocate & post buffers for this RCB */
.rcb_destroy_cbfn = bnad_cb_rcb_destroy,
.ccb_setup_cbfn = bnad_cb_ccb_setup,
.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
+ .rx_stall_cbfn = bnad_cb_rx_stall,
.rx_cleanup_cbfn = bnad_cb_rx_cleanup,
.rx_post_cbfn = bnad_cb_rx_post,
};
/* Bit positions for rcb->flags */
#define BNAD_RXQ_REFILL 0
#define BNAD_RXQ_STARTED 1
+#define BNAD_RXQ_POST_OK 2
/* Resource limits */
#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)