****************************************************************************
*/
-#define BFI_SGE_INLINE 1
-#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
-
-/**
- * SG Flags
- */
-enum {
- BFI_SGE_DATA = 0, /*!< data address, not last */
- BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */
- BFI_SGE_DATA_LAST = 3, /*!< data address, last */
- BFI_SGE_LINK = 2, /*!< link address */
- BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */
-};
-
/**
* DMA addresses
*/
} a32;
};
-/**
- * Scatter Gather Element
- */
-struct bfi_sge {
-#ifdef __BIGENDIAN
- u32 flags:2,
- rsvd:2,
- sg_len:28;
-#else
- u32 sg_len:28,
- rsvd:2,
- flags:2;
-#endif
- union bfi_addr_u sga;
-};
-
-/**
- * Scatter Gather Page
- */
-#define BFI_SGPG_DATA_SGES 7
-#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
-#define BFI_SGPG_RSVD_WD_LEN 8
-struct bfi_sgpg {
- struct bfi_sge sges[BFI_SGPG_SGES_MAX];
- u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
-};
-
/*
* Large Message structure - 128 Bytes size Msgs
*/
#define BFI_LMSG_PL_WSZ \
((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
-struct bfi_msg {
- struct bfi_mhdr mhdr;
- u32 pl[BFI_LMSG_PL_WSZ];
-};
-
/**
* Mailbox message structure
*/
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
#ifndef __BNA_H__
#define __BNA_H__
-#include "bfa_cs.h"
+#include "bfa_defs.h"
#include "bfa_ioc.h"
-#include "cna.h"
+#include "bfi_enet.h"
#include "bna_types.h"
extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
void bna_uninit(struct bna *bna);
int bna_num_txq_set(struct bna *bna, int num_txq);
int bna_num_rxp_set(struct bna *bna, int num_rxp);
-void bna_stats_get(struct bna *bna);
-void bna_get_perm_mac(struct bna *bna, u8 *mac);
void bna_hw_stats_get(struct bna *bna);
-/* APIs for Rx */
-
/* APIs for RxF */
struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-void bna_rx_hds_enable(struct bna_rx *rx, struct bna_hds_config *hds_config,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-void bna_rx_hds_disable(struct bna_rx *rx,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-
/**
* ENET
*/
BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
while (to_alloc--) {
- if (!wi_range) {
+ if (!wi_range)
BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
wi_range);
- }
skb = netdev_alloc_skb_ip_align(bnad->netdev,
rcb->rxq->buffer_size);
if (unlikely(!skb)) {
}
static void
-bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
- if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- return;
-
- bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
- bna_ib_ack(ccb->i_dbell, 0);
-}
-
-static void
-bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
- unsigned long flags;
-
- /* Because of polling context */
- spin_lock_irqsave(&bnad->bna_lock, flags);
- bnad_enable_rx_irq_unsafe(ccb);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
-}
-
-static void
bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
{
struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
return rcvd;
poll_exit:
- napi_complete((napi));
+ napi_complete(napi);
rx_ctrl->rx_complete++;
return 0;
}
-/* Called with bnad_conf_lock() held */
+/* Called with mutex_lock(&bnad->conf_mutex) held */
static void
bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
{
u16 vid;
unsigned long flags;
- BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX));
-
for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
{
int err;
- /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
- BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err) {
} else {
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
- BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
ipv6h->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
if (ret > 0) {
/* Not enough MSI-X vectors. */
+ pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
+ ret, bnad->msix_num);
spin_lock_irqsave(&bnad->bna_lock, flags);
/* ret = #of vectors that we got */
return;
intx_mode:
+ pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
kfree(bnad->msix_table);
bnad->msix_table = NULL;
/*
* Takes care of the Tx that is scheduled between clearing the flag
- * and the netif_stop_all_queue() call.
+ * and the netif_tx_stop_all_queues() call.
*/
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
txq_prod = tcb->producer_index;
BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
- BUG_ON(!(wi_range <= tcb->q_depth));
txqent->hdr.wi.reserved = 0;
txqent->hdr.wi.num_vectors = vectors;
bnad_isr(bnad->pcidev->irq, netdev);
bna_intx_enable(&bnad->bna, curr_mask);
} else {
+ /*
+ * Tx processing may happen in sending context, so no need
+ * to explicitly process completions here
+ */
+
+ /* Rx processing */
for (i = 0; i < bnad->num_rx; i++) {
rx_info = &bnad->rx_info[i];
if (!rx_info->rx)
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
#include <linux/if_ether.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/string.h>
-
-#include <linux/list.h>
#define bfa_sm_fault(__event) do { \
- pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
- __event); \
+ pr_err("SM Assertion failure: %s: %d: event = %d\n", \
+ __FILE__, __LINE__, __event); \
} while (0)
extern char bfa_version[];