#if 0
if (dev->flags & IFF_PROMISC)
return;
- else if (dev->mc_count)
+ else if (!netdev_mc_empty(dev))
dev->flags |= IFF_ALLMULTI;
else
dev->flags &= ~IFF_ALLMULTI;
}
nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
- netdev->mc_count, !!(netdev->flags & IFF_PROMISC),
+ netdev_mc_count(netdev), !!(netdev->flags & IFF_PROMISC),
!!(netdev->flags & IFF_ALLMULTI));
if (!mc_all_on) {
multicast_addr = netdev->mc_list;
} else if ((dev->flags & IFF_ALLMULTI)) {
dprintk("%s: allmulti mode\n", dev->name);
priv->rx_mode = RX_MODE_ALL_MULTI;
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
int mci;
struct dev_mc_list *mc;
dprintk("%s: set_mc_list, %d entries\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
priv->rx_mode = RX_MODE_MULTI;
priv->multi_num = 0;
for (mci = 0, mc=dev->mc_list;
- mci < dev->mc_count;
+ mci < netdev_mc_count(dev);
mc = mc->next, mci++) {
dvb_set_mc_filter(dev, mc);
}
dev->header_ops = &dvb_header_ops;
dev->netdev_ops = &dvb_netdev_ops;
dev->mtu = 4096;
- dev->mc_count = 0;
dev->flags |= IFF_NOARP;
}
/* send a "load multicast list" command to the board, max 10 addrs/cmd */
/* if num_addrs==0 the list will be cleared */
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
- adapter->tx_pcb.length = 6 * dev->mc_count;
- for (i = 0; i < dev->mc_count; i++) {
+ adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
+ for (i = 0; i < netdev_mc_count(dev); i++) {
memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6);
dmi = dmi->next;
}
TIMEOUT_MSG(__LINE__);
}
}
- if (dev->mc_count)
+ if (!netdev_mc_empty(dev))
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
else /* num_addrs == 0 */
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
unsigned long flags;
struct el3_private *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
+ int mc_count = netdev_mc_count(dev);
if (el3_debug > 1) {
static int old;
- if (old != dev->mc_count) {
- old = dev->mc_count;
- pr_debug("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ if (old != mc_count) {
+ old = mc_count;
+ pr_debug("%s: Setting Rx mode to %d addresses.\n",
+ dev->name, mc_count);
}
}
spin_lock_irqsave(&lp->lock, flags);
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
}
- else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) {
+ else if (mc_count || (dev->flags&IFF_ALLMULTI)) {
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
}
else
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
* Multicast setup
*/
- if (dev->mc_count) {
+ if (num_addrs) {
/* I don't understand this: do we really need memory after the init? */
int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
if (len <= 0) {
if ((dev->flags&IFF_PROMISC) ||
(dev->flags&IFF_ALLMULTI) ||
- dev->mc_count > 10)
+ netdev_mc_count(dev) > 10)
/* Enable promiscuous mode */
filt |= 1;
- else if(dev->mc_count)
+ else if (!netdev_mc_empty(dev))
{
unsigned char block[62];
unsigned char *bp;
if(!lp->mc_list_valid)
{
block[1]=0;
- block[0]=dev->mc_count;
+ block[0]=netdev_mc_count(dev);
bp=block+2;
- for(i=0;i<dev->mc_count;i++)
+ for(i=0;i<netdev_mc_count(dev);i++)
{
memcpy(bp, dmc->dmi_addr, 6);
bp+=6;
dmc=dmc->next;
}
- if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
+ if(mc32_command_nowait(dev, 2, block,
+ 2+6*netdev_mc_count(dev))==-1)
{
lp->mc_reload_wait = 1;
return;
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ for (i = 0; i < netdev_mc_count(dev); i++){
addrs = dmi->dmi_addr;
dmi = dmi->next;
int config = 0, cnt;
DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
- dev->name, dev->mc_count,
+ dev->name, netdev_mc_count(dev),
dev->flags & IFF_PROMISC ? "ON" : "OFF",
dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
i596_add_cmd(dev, &lp->cf_cmd.cmd);
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT)
{
cnt = MAX_MC_CNT;
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
return;
cmd = &lp->mc_cmd;
cmd->cmd.command = CmdMulticastList;
- cmd->mc_cnt = dev->mc_count * 6;
+ cmd->mc_cnt = netdev_mc_count(dev) * 6;
cp = cmd->mc_addrs;
for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
memcpy(cp, dmi->dmi_addr, 6);
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ for (i = 0; i < netdev_mc_count(dev); i++){
addrs = dmi->dmi_addr;
dmi = dmi->next;
* set the entire multicast list at a time and keeping track of
* it here is going to be messy.
*/
- if ((dev->mc_count) && !(ap->mcast_all)) {
+ if (!netdev_mc_empty(dev) && !ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_ENABLE;
cmd.idx = 0;
}
else
writel( PROM, lp->mmio + CMD2);
- if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > MAX_FILTER_SIZE) {
/* get all multicast packet */
mc_filter[1] = mc_filter[0] = 0xffffffff;
lp->mc_list = dev->mc_list;
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
return;
}
- if( dev->mc_count == 0 ){
+ if (netdev_mc_empty(dev)) {
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
lp->mc_list = NULL;
lp->options |= OPTION_MULTICAST_ENABLE;
lp->mc_list = dev->mc_list;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
+ for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < netdev_mc_count(dev);
i++, mc_ptr = mc_ptr->next) {
bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
lance->RDP = PROM; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer filtering. */
memset(multicast_table, (num_addrs == 0) ? 0 : -1,
mc_filter[0] = mc_filter[1] = 0;
curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, curr = curr->next) {
if (!curr) break; /* unexpected end of list */
bitnr = hash_get_index(curr->dmi_addr);
at91_emac_write(AT91_EMAC_HSH, -1);
at91_emac_write(AT91_EMAC_HSL, -1);
cfg |= AT91_EMAC_MTI;
- } else if (dev->mc_count > 0) { /* Enable specific multicasts */
+ } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
at91ether_sethashtable(dev);
cfg |= AT91_EMAC_MTI;
} else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
if (dev->flags & IFF_PROMISC) {
/* promiscuous mode */
priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
- } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
} else
priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
struct port *port = netdev_priv(dev);
struct dev_mc_list *mclist = dev->mc_list;
u8 diffs[ETH_ALEN], *addr;
- int cnt = dev->mc_count, i;
+ int cnt = netdev_mc_count(dev), i;
if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
if (ndev->flags & IFF_ALLMULTI) {
/* enable all multicast mode */
ctrl |= DRXC_RM;
- } else if (ndev->mc_count > KS8695_NR_ADDRESSES) {
+ } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
/* more specific multicast addresses than can be
* handled in hardware
*/
/* enable specific multicasts */
ctrl &= ~DRXC_RM;
ks8695_init_partial_multicast(ksp, ndev->mc_list,
- ndev->mc_count);
+ netdev_mc_count(ndev));
}
ks8695_writereg(ksp, KS8695_DRXC, ctrl);
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
+ if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
lp->addr_mode = CMR2h_PROMISC;
else
lp->addr_mode = CMR2h_Normal;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
new_mode = CMR2h_PROMISC;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
new_mode = CMR2h_Normal;
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next)
{
int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aup->mac->control |= MAC_PROMISCUOUS;
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > MULTICAST_FILTER_LIMIT) {
+ netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
aup->mac->control |= MAC_PASS_ALL_MULTI;
aup->mac->control &= ~MAC_PROMISCUOUS;
printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
u32 mc_filter[2]; /* Multicast hash filter */
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
(long *)mc_filter);
struct dev_mc_list *mclist;
int i, num_ents;
- num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
+ num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
mclist = dev->mc_list;
for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
__b44_cam_write(bp, mclist->dmi_addr, i + 1);
__b44_set_mac_addr(bp);
if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > B44_MCAST_TABLE_SIZE))
+ (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
val |= RXCONFIG_ALLMULTI;
else
i = __b44_load_mcast(bp, dev);
/* only 3 perfect match registers left, first one is used for
* own mac address */
- if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
val |= ENET_RXCFG_ALLMCAST_MASK;
else
val &= ~ENET_RXCFG_ALLMCAST_MASK;
}
for (i = 0, mc_list = dev->mc_list;
- (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
+ (mc_list != NULL) && (i < netdev_mc_count(dev)) && (i < 3);
i++, mc_list = mc_list->next) {
u8 *dmi_addr;
u32 tmp;
}
/* Enable multicast promisc if num configured exceeds what we support */
- if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > BE_MAX_MC) {
be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
&adapter->mc_cmd_mem);
goto done;
}
be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
- netdev->mc_count, &adapter->mc_cmd_mem);
+ netdev_mc_count(netdev), &adapter->mc_cmd_mem);
done:
return;
}
emac_hashhi = emac_hashlo = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PAM;
bfin_write_EMAC_OPMODE(sysctl);
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* set up multicast hash table */
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= HM;
{
struct dev_mc_list *dmi;
struct bmac_data *bp = netdev_priv(dev);
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
unsigned short rx_cfg;
int i;
XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
unsigned short rx_cfg;
u32 crc;
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
bmwrite(dev, BHASH0, 0xffff);
bmwrite(dev, BHASH1, 0xffff);
bmwrite(dev, BHASH2, 0xffff);
for(i = 0; i < 4; i++) hash_table[i] = 0;
- for(i = 0; i < dev->mc_count; i++) {
+ for(i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
rx_mode = BNX2X_RX_MODE_PROMISC;
else if ((dev->flags & IFF_ALLMULTI) ||
- ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else { /* some multicasts */
bnx2x_sp(bp, mcast_config);
for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
+ mclist && (i < netdev_mc_count(dev));
i++, mclist = mclist->next) {
config->config_table[i].
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
+ mclist && (i < netdev_mc_count(dev));
i++, mclist = mclist->next) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
-#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
+#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev))
static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
{
u8 *addr = NULL;
- if (rm->idx++ < rm->dev->mc_count) {
+ if (rm->idx++ < t1_rx_mode_mc_cnt(rm)) {
addr = rm->list->dmi_addr;
rm->list = rm->list->next;
}
* cpmac uses some strange mac address hashing
* (not crc32)
*/
- for (i = 0, iter = dev->mc_list; i < dev->mc_count;
+ for (i = 0, iter = dev->mc_list; i < netdev_mc_count(dev);
i++, iter = iter->next) {
bit = 0;
tmp = iter->dmi_addr[0];
set_multicast_list(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
- int num_addr = dev->mc_count;
+ int num_addr = netdev_mc_count(dev);
unsigned long int lo_bits;
unsigned long int hi_bits;
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > EMAC_DEF_MAX_MULTICAST_ADDRESSES)) {
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
- if (ndev->mc_count > 0) {
+ if (!netdev_mc_empty(ndev)) {
struct dev_mc_list *mc_ptr;
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
static void de620_set_multicast_list(struct net_device *dev)
{
- if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{ /* Enable promiscuous mode */
de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
}
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
* perfect filtering will be used.
*/
- if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
+ if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
{
bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
bp->mc_count = 0; /* Don't add mc addrs to CAM */
else
{
bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
- bp->mc_count = dev->mc_count; /* Add mc addrs to CAM */
+ bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
}
/* Copy addresses to multicast address table, then update adapter CAM */
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ for (i = 0; i < netdev_mc_count(dev); i++) { /* for each address in the list */
addrs = dmi->dmi_addr;
dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
} else if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > multicast_filter_limit)) {
+ (netdev_mc_count(dev) > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *mclist;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i=0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist=mclist->next)
{
int bit, index = 0;
{
board_info_t *db = netdev_priv(dev);
struct dev_mc_list *mcptr = dev->mc_list;
- int mc_cnt = dev->mc_count;
+ int mc_cnt = netdev_mc_count(dev);
int i, oft;
u32 hash_val;
u16 hash_table[4];
{
struct net_device *netdev = nic->netdev;
struct dev_mc_list *list = netdev->mc_list;
- u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
+ u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
cb->command = cpu_to_le16(cb_multi);
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
struct nic *nic = netdev_priv(netdev);
DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
- netdev->mc_count, netdev->flags);
+ netdev_mc_count(netdev), netdev->flags);
if (netdev->flags & IFF_PROMISC)
nic->flags |= promiscuous;
nic->flags &= ~promiscuous;
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
+ netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
nic->flags |= multicast_all;
else
nic->flags &= ~multicast_all;
short ioaddr = dev->base_addr;
unsigned short mode;
struct dev_mc_list *dmi=dev->mc_list;
+ int mc_count = mc_count;
- if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
}
- else if (dev->mc_count==0 )
+ else if (mc_count == 0)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
outw(MC_SETUP, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
- outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
+ outw(6 * (mc_count + 1), ioaddr + IO_PORT);
- for (i = 0; i < dev->mc_count; i++)
+ for (i = 0; i < mc_count; i++)
{
eaddrs=(unsigned short *)dmi->dmi_addr;
dmi=dmi->next;
outb(MC_SETUP, ioaddr);
/* Update the transmit queue */
- i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
+ i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
if (lp->tx_start != lp->tx_end)
{
break;
} else if ((i & 0x0f) == 0x03) { /* MC-Done */
printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
- dev->name, dev->mc_count,
- dev->mc_count > 1 ? "es":"");
+ dev->name, mc_count,
+ mc_count > 1 ? "es":"");
break;
}
}
{
struct dev_mc_list *dmi;
unsigned short ioaddr = dev->base_addr;
- int count = dev->mc_count;
+ int count = netdev_mc_count(dev);
int i;
if (count > 8) {
printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
}
if (!(dev->flags & IFF_PROMISC)) {
eexp_setup_filter(dev);
- if (lp->old_mc_count != dev->mc_count) {
+ if (lp->old_mc_count != netdev_mc_count(dev)) {
kick = 1;
- lp->old_mc_count = dev->mc_count;
+ lp->old_mc_count = netdev_mc_count(dev);
}
}
if (kick) {
}
ehea_allmulti(dev, 0);
- if (dev->mc_count) {
+ if (!netdev_mc_empty(dev)) {
ret = ehea_drop_multicast_list(dev);
if (ret) {
/* Dropping the current multicast list failed.
ehea_allmulti(dev, 1);
}
- if (dev->mc_count > port->adapter->max_mc_mac) {
+ if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
ehea_info("Mcast registration limit reached (0x%llx). "
"Use ALLMULTI!",
port->adapter->max_mc_mac);
goto out;
}
- for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
+ for (i = 0, k_mcl_entry = dev->mc_list; i < netdev_mc_count(dev); i++,
k_mcl_entry = k_mcl_entry->next)
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
if (netif_msg_link(priv))
dev_info(&dev->dev, "promiscuous mode\n");
priv->rxfilter = RXFILTER_PROMISC;
- } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) {
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
if (netif_msg_link(priv))
dev_info(&dev->dev, "%smulticast mode\n",
(dev->flags & IFF_ALLMULTI) ? "all-" : "");
int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
+ unsigned int mc_count = netdev_mc_count(netdev);
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
+ mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int mc_count = netdev->mc_count;
unsigned int i, j;
if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
outl(0x002C, ioaddr + RxCtrl);
/* Unconditionally log net taps. */
memset(mc_filter, 0xff, sizeof(mc_filter));
- } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
/* There is apparently a chip bug, so the multicast filter
is never enabled. */
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outl(0x000C, ioaddr + RxCtrl);
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
outl(0x0004, ioaddr + RxCtrl);
return;
} else { /* Never executed, for now. */
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit_nr =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
{
int ioaddr = dev->base_addr;
- if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{
outb(3, ioaddr + RECEIVE_MODE_REG);
} else {
}
/* Update table */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ for (i = 0; i < netdev_mc_count(dev); i++) { /* for each address in the list */
addrs = dmi->dmi_addr;
dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit;
bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
dmi = dev->mc_list;
- for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
+ for (j = 0; j < netdev_mc_count(dev); j++, dmi = dmi->next) {
/* Only support group multicast for now */
if (!(dmi->dmi_addr[0] & 1))
continue;
u32 gaddr2 = 0x00000000;
dmi = dev->mc_list;
- for (i=0; i<dev->mc_count; i++) {
+ for (i=0; i<netdev_mc_count(dev); i++) {
crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
W32(ep, fen_gaddrh, 0xffffffff);
W32(ep, fen_gaddrl, 0xffffffff);
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
fep->fec.hthi = 0xffffffffU;
fep->fec.htlo = 0xffffffffU;
}
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
W16(ep, sen_gaddr1, 0xffff);
W16(ep, sen_gaddr2, 0xffff);
em_num = 0;
}
- if (dev->mc_count == 0)
+ if (netdev_mc_empty(dev))
return;
/* Parse the list, and set the appropriate bits */
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writew(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the CAM filter. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
struct dev_mc_list *mclist;
int i;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
memset(&lp->hash_bytes, 0xff, 8);
- } else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
memset(&lp->hash_bytes, 0x00, 8);
#ifdef HP100_DEBUG
- printk("hp100: %s: computing hash filter - mc_count = %i\n", dev->name, dev->mc_count);
+ printk("hp100: %s: computing hash filter - mc_count = %i\n",
+ dev->name, netdev_mc_count(dev));
#endif
- for (i = 0, dmi = dev->mc_list; i < dev->mc_count; i++, dmi = dmi->next) {
+ for (i = 0, dmi = dev->mc_list; i < netdev_mc_count(dev); i++, dmi = dmi->next) {
addrs = dmi->dmi_addr;
if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
struct dev_mc_list *dmi;
int i;
- DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
+ DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
memset(gaht_temp, 0, sizeof (gaht_temp));
if (ndev->flags & IFF_PROMISC)
r |= EMAC_RMR_PME;
else if (ndev->flags & IFF_ALLMULTI ||
- (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
+ (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
r |= EMAC_RMR_PMME;
- else if (ndev->mc_count > 0)
+ else if (!netdev_mc_empty(ndev))
r |= EMAC_RMR_MAE;
return r;
struct ibmveth_adapter *adapter = netdev_priv(netdev);
unsigned long lpar_rc;
- if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
IbmVethMcastDisableFiltering,
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
- for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
+ for(i = 0; i < netdev_mc_count(netdev); ++i, mclist = mclist->next) {
// add the multicast address to the filter table
unsigned long mcast_addr = 0;
memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
u32 vmolr = 0;
int i;
- if (!netdev->mc_count) {
+ if (netdev_mc_empty(netdev)) {
/* nothing to program, so clear mc list */
igb_update_mc_addr_list(hw, NULL, 0);
igb_restore_vf_multicasts(adapter);
return 0;
}
- mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return -ENOMEM;
/* The shared function expects a packed array of only addresses. */
mc_ptr = netdev->mc_list;
- for (i = 0; i < netdev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(netdev); i++) {
if (!mc_ptr)
break;
memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
- return netdev->mc_count;
+ return netdev_mc_count(netdev);
}
/**
u8 *mta_list = NULL;
int i;
- if (netdev->mc_count) {
- mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list) {
dev_err(&adapter->pdev->dev,
"failed to allocate multicast filter list\n");
/* prepare a packed array of only addresses. */
mc_ptr = netdev->mc_list;
- for (i = 0; i < netdev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(netdev); i++) {
if (!mc_ptr)
break;
memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
(void) ioc3_r_emcr();
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 64)) {
/* Too many for hashing to make sense or we want all
multicast packets anyway, so skip computing all the
hashes and just accept all packets. */
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
char *addr = dmi->dmi_addr;
dmi = dmi->next;
receivemode = IPG_RM_RECEIVEALLFRAMES;
} else if ((dev->flags & IFF_ALLMULTI) ||
((dev->flags & IFF_MULTICAST) &&
- (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) {
+ (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
/* NIC to be configured to receive all multicast
* frames. */
receivemode |= IPG_RM_RECEIVEMULTICAST;
- } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) {
+ } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
/* NIC to be configured to receive selected
* multicast addresses. */
receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
/* Enable promiscuous mode */
outw(MULTICAST|PROMISC, ioaddr);
}
- else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
+ else if ((dev->flags&IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > HW_MAX_ADDRS)
{
/* Disable promiscuous mode, use normal mode. */
hardware_set_filter(NULL);
outw(MULTICAST, ioaddr);
}
- else if(dev->mc_count)
+ else if (!netdev_mc_empty(dev))
{
/* Walk the address list, and load the filter */
hardware_set_filter(dev->mc_list);
write_lock_irqsave(&port->mcast_gate, flags);
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > VETH_MAX_MCAST)) {
+ (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
port->promiscuous = 1;
} else {
struct dev_mc_list *dmi = dev->mc_list;
/* Update table */
port->num_mcast = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
u8 *addr = dmi->dmi_addr;
u64 xaddr = 0;
rctl |= IXGB_RCTL_VFE;
}
- if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
+ if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
- ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
+ ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
}
return;
if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
+ (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev->mc_count;
+ addr_count = netdev_mc_count(netdev);
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
int addr_count = 0;
/* reprogram multicast list */
- addr_count = netdev->mc_count;
+ addr_count = netdev_mc_count(netdev);
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
if (hw->mac.ops.update_mc_addr_list)
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
for (i = 0, mclist = netdev->mc_list;
- mclist && i < netdev->mc_count;
+ mclist && i < netdev_mc_count(netdev);
++i, mclist = mclist->next) {
bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
if (dev->flags & IFF_PROMISC)
recognise |= ETH_ARC_PRO;
- else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4))
+ else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
/* All multicast and broadcast */
recognise |= ETH_ARC_AM;
/* Build the hash table */
- if (dev->mc_count > 4) {
+ if (netdev_mc_count(dev) > 4) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
char *addrs = dmi->dmi_addr;
dmi = dmi->next;
rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
RXCR1_RXPAFMA | RXCR1_RXMAFMA);
- } else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) {
+ } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
struct dev_mc_list *mcptr = dev->mc_list;
u32 crc;
int i;
/* accept some multicast */
- for (i = dev->mc_count; i > 0; i--) {
+ for (i = netdev_mc_count(dev); i > 0; i--) {
crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
crc >>= (32 - 6); /* get top six bits */
else
ks_set_promis(ks, false);
- if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
- if (netdev->mc_count <= MAX_MCAST_LST) {
+ if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
+ if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
int i = 0;
for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
if (!(*ptr->dmi_addr & 1))
} else {
short multicast_table[4];
int i;
- int num_addrs=dev->mc_count;
+ int num_addrs=netdev_mc_count(dev);
if(dev->flags&IFF_ALLMULTI)
num_addrs=1;
/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
}
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT) {
cnt = MAX_MC_CNT;
printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
cmd = &dma->mc_cmd;
cmd->cmd.command = SWAP16(CmdMulticastList);
- cmd->mc_cnt = SWAP16(dev->mc_count * 6);
+ cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
cp = cmd->mc_addrs;
for (dmi = dev->mc_list;
cnt && dmi != NULL;
mutex_lock(&lp->indirect_mutex);
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
+ netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
/*
* We must make the kernel realise we had to move
* into promisc mode or we start all out war on
ndev->flags |= IFF_PROMISC;
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
- } else if (ndev->mc_count) {
+ } else if (!netdev_mc_empty(ndev)) {
struct dev_mc_list *mclist = ndev->mc_list;
- for (i = 0; mclist && i < ndev->mc_count; i++) {
+ for (i = 0; mclist && i < netdev_mc_count(ndev); i++) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
if (i596_debug > 1)
printk ("%s: set multicast list %d\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
char *cp;
- cmd = kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
+ cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
+ netdev_mc_count(dev) * 6, GFP_ATOMIC);
if (cmd == NULL) {
printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
return;
}
cmd->command = CmdMulticastList;
- *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
+ *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
memcpy(cp, dmi,6);
if (lp->set_conf.pa_next != I596_NULL) {
return;
}
- if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ if (netdev_mc_empty(dev) &&
+ !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
lp->i596_config[8] &= ~0x01;
} else {
lp->i596_config[8] |= 0x01;
mc_filter[0] = mc_filter[1] = 0;
curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, curr = curr->next) {
if (!curr) break; /* unexpected end of list */
bitnr = hash_get_index(curr->dmi_addr);
macb_writel(bp, HRB, -1);
macb_writel(bp, HRT, -1);
cfg |= MACB_BIT(NCFGR_MTI);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enable specific multicasts */
macb_sethashtable(dev);
cfg |= MACB_BIT(NCFGR_MTI);
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
crc = ether_crc_le(6, dmi->dmi_addr);
j = crc >> 26; /* bit number in multicast_filter */
multicast_filter[j >> 3] |= 1 << (j & 7);
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
crc = ether_crc_le(6, dmi->dmi_addr);
j = crc >> 26; /* bit number in multicast_filter */
multicast_filter[j >> 3] |= 1 << (j & 7);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptMyPhys;
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
mc_filter[b/8] |= (1 << (b & 0x07));
return;
}
- if (netdev->mc_count == 0) {
+ if (netdev_mc_empty(netdev)) {
adapter->set_promisc(adapter,
NETXEN_NIU_NON_PROMISC_MODE);
netxen_nic_disable_mcast_filter(adapter);
adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > adapter->max_mc_count) {
+ netdev_mc_count(netdev) > adapter->max_mc_count) {
netxen_nic_disable_mcast_filter(adapter);
return;
}
for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next, index++)
netxen_nic_set_mcast_addr(adapter, index, mc_ptr->dmi_addr);
- if (index != netdev->mc_count)
+ if (index != netdev_mc_count(netdev))
printk(KERN_WARNING "%s: %s multicast address count mismatch\n",
netxen_nic_driver_name, netdev->name);
}
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > adapter->max_mc_count)) {
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
goto send_fw_cmd;
}
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
for (mc_ptr = netdev->mc_list; mc_ptr;
mc_ptr = mc_ptr->next) {
nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
struct tdr_cmd_struct __iomem *tdr_cmd;
struct mcsetup_cmd_struct __iomem *mc_cmd;
struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
ptr = p->scb + 1;
if(dev->flags & IFF_PROMISC)
ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
- else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
+ else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
ni65_init_lance(p,dev->dev_addr,0xff,0x0);
else
ni65_init_lance(p,dev->dev_addr,0x00,0x00);
np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
if (dev->flags & IFF_PROMISC)
np->flags |= NIU_FLAGS_PROMISC;
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
+ if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
np->flags |= NIU_FLAGS_MCAST;
alt_cnt = netdev_uc_count(dev);
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 16; i++)
hash[i] = 0xffff;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
for (addr = dev->mc_list; addr; addr = addr->next) {
u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
else
and_mask &= ~(RFCR_AAU | RFCR_AAM);
- if (ndev->flags & IFF_ALLMULTI || ndev->mc_count)
+ if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev))
or_mask |= RFCR_AAM;
else
and_mask &= ~RFCR_AAM;
}
if (netdev->flags & IFF_MULTICAST) {
- if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
- || netdev->mc_count > available_cam_entries)
+ if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > available_cam_entries)
multicast_mode = 2; /* 1 - Accept all multicast. */
else
multicast_mode = 0; /* 0 - Use CAM. */
}
}
if (multicast_mode == 0) {
- i = netdev->mc_count;
+ i = netdev_mc_count(netdev);
list = netdev->mc_list;
while (i--) {
octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
if (dev->flags & IFF_PROMISC)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
else
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
if (dev->flags & IFF_PROMISC)
opts |= RxMulticast | RxProm;
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
opts |= RxMulticast;
outw(opts, ioaddr + EL3_CMD);
}
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
#endif
/* Set multicast_num_addrs. */
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
/* Set multicast_ladrf. */
if (num_addrs > 0) {
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
dmi = dmi->next;
BuildLAF(lp->multicast_ladrf, adr);
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
}
#endif
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
restore_multicast_list(dev);
} /* set_multicast_list */
} else if (dev->flags & IFF_ALLMULTI)
rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
else {
- if (dev->mc_count) {
- fill_multicast_tbl(dev->mc_count, dev->mc_list,
+ if (!netdev_mc_empty(dev)) {
+ fill_multicast_tbl(netdev_mc_count(dev), dev->mc_list,
(u_char *)multicast_table);
}
rx_cfg_setting = RxStripCRC | RxEnable;
if (++n > 9)
break;
i = 0;
- if (n > 1 && n <= dev->mc_count && dmi) {
+ if (n > 1 && n <= netdev_mc_count(dev) && dmi) {
dmi = dmi->next;
}
}
SelectPage(k);
}
- if (n && n <= dev->mc_count && dmi)
+ if (n && n <= netdev_mc_count(dev) && dmi)
addr = dmi->dmi_addr;
else
addr = dev->dev_addr;
if (dev->flags & IFF_PROMISC) { /* snoop */
PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
- } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
+ } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) {
PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* the chip can filter 9 addresses perfectly */
PutByte(XIRCREG42_SWC1, value | 0x01);
SelectPage(0x40);
ib->filter[1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
status);
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) {
+ (netdev_mc_count(netdev) > GELIC_NET_MC_COUNT_MAX)) {
status = lv1_net_add_multicast_address(bus_id(card),
dev_id(card),
0, 1);
}
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > adapter->max_mc_count)) {
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
goto send_fw_cmd;
}
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
for (mc_ptr = netdev->mc_list; mc_ptr;
mc_ptr = mc_ptr->next) {
qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
* transition is taking place.
*/
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
+ (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
}
}
- if (ndev->mc_count) {
+ if (!netdev_mc_empty(ndev)) {
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
goto exit;
}
/* Too many multicast addresses
* accept all traffic */
- else if ((dev->mc_count > MCAST_MAX) || (dev->flags & IFF_ALLMULTI))
+ else if ((netdev_mc_count(dev) > MCAST_MAX) ||
+ (dev->flags & IFF_ALLMULTI))
reg |= 0x0020;
iowrite16(reg, ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
/* Build the hash table */
- if (dev->mc_count > MCAST_MAX) {
+ if (netdev_mc_count(dev) > MCAST_MAX) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
char *addrs = dmi->dmi_addr;
dmi = dmi->next;
iowrite16(hash_table[3], ioaddr + MAR3);
}
/* Multicast Address 1~4 case */
- for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) {
+ for (i = 0, dmi; (i < netdev_mc_count(dev)) && (i < MCAST_MAX); i++) {
adrp = (u16 *)dmi->dmi_addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8*i);
iowrite16(adrp[1], ioaddr + MID_1M + 8*i);
iowrite16(adrp[2], ioaddr + MID_1H + 8*i);
dmi = dmi->next;
}
- for (i = dev->mc_count; i < MCAST_MAX; i++) {
+ for (i = netdev_mc_count(dev); i < MCAST_MAX; i++) {
iowrite16(0xffff, ioaddr + MID_0L + 8*i);
iowrite16(0xffff, ioaddr + MID_0M + 8*i);
iowrite16(0xffff, ioaddr + MID_0H + 8*i);
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
/* Update individual M_CAST address list */
- if ((!sp->m_cast_flg) && dev->mc_count) {
- if (dev->mc_count >
+ if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
+ if (netdev_mc_count(dev) >
(config->max_mc_addr - config->max_mac_addr)) {
DBG_PRINT(ERR_DBG,
"%s: No more Rx filters can be added - "
}
prev_cnt = sp->mc_addr_count;
- sp->mc_addr_count = dev->mc_count;
+ sp->mc_addr_count = netdev_mc_count(dev);
/* Clear out the previous list of Mc in the H/W. */
for (i = 0; i < prev_cnt; i++) {
}
/* Create the new Rx filter list and update the same in H/W. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
ETH_ALEN);
u32 mar0 = 0, mar1 = 0;
if ((dev->flags & IFF_PROMISC) ||
- dev->mc_count > multicast_filter_limit ||
+ netdev_mc_count(dev) > multicast_filter_limit ||
(dev->flags & IFF_ALLMULTI))
mar0 = mar1 = 0xffffffff;
else if (dev->flags & IFF_MULTICAST) {
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- for (i = 0; i < net_dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net_dev); i++) {
crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
if(dev->flags & IFF_PROMISC)
sp->mode = SEEQ_RCMD_RANY;
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count)
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
sp->mode = SEEQ_RCMD_RBMCAST;
else
sp->mode = SEEQ_RCMD_RBCAST;
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr =
ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
rx_mode = RFPromiscuous;
for (i = 0; i < table_entries; i++)
mc_filter[i] = 0xffff;
- } else if ((net_dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
(net_dev->flags & IFF_ALLMULTI)) {
/* too many multicast addresses or accept all multicast packet */
rx_mode = RFAAB | RFAAM;
struct dev_mc_list *mclist;
rx_mode = RFAAB;
for (i = 0, mclist = net_dev->mc_list;
- mclist && i < net_dev->mc_count;
+ mclist && i < netdev_mc_count(net_dev);
i++, mclist = mclist->next) {
unsigned int bit_nr =
sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
- } else if (dev->mc_count > 0) {
- if (dev->mc_count <= FPMAX_MULTICAST) {
+ } else if (!netdev_mc_empty(dev)) {
+ if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
/* use exact filtering */
// point to first multicast addr
dmi = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
mac_add_multicast(smc,
(struct fddi_addr *)dmi->dmi_addr,
1);
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- int i, count = dev->mc_count;
+ int i, count = netdev_mc_count(dev);
struct dev_mc_list *list = dev->mc_list;
u32 mode;
u8 filter[8];
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI) /* all multicast */
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)/* no multicast */
+ else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
reg &= ~GM_RXCR_MCF_ENA;
else {
int i;
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ for (i = 0; list && i < netdev_mc_count(dev); i++, list = list->next)
yukon_add_filter(filter, list->dmi_addr);
}
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI)
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)
+ else if (netdev_mc_empty(dev) && !rx_pause)
reg &= ~GM_RXCR_MCF_ENA;
else {
int i;
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ for (i = 0; list && i < netdev_mc_count(dev); i++, list = list->next)
sky2_add_filter(filter, list->dmi_addr);
}
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
mcr |= MAC_CR_MCPAS_;
}
* the number of the 32 bit register, while the low 5 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
+ else if (!netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *cur_addr;
memset(multicast_table, 0, sizeof(multicast_table));
cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, cur_addr = cur_addr->next) {
u32 position;
/* do we have a pointer here? */
/* We just get all multicast packets even if we only want them
. from one source. This will be changed at some future
. point. */
- else if (dev->mc_count ) {
+ else if (!netdev_mc_empty(dev)) {
/* support hardware multicasting */
/* be sure I get rid of flags I might have set */
ioaddr + RCR );
/* NOTE: this has to set the bank, so make sure it is the
last thing called. The bank is set to zero at the top */
- smc_setmulticast( ioaddr, dev->mc_count, dev->mc_list );
+ smc_setmulticast(ioaddr, netdev_mc_count(dev), dev->mc_list);
}
else {
outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(2, "%s: RCR_ALMUL\n", dev->name);
lp->rcr_cur_mode |= RCR_ALMUL;
}
* the number of the 8 bit register, while the low 3 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
+ else if (!netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *cur_addr;
memset(multicast_table, 0, sizeof(multicast_table));
cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, cur_addr = cur_addr->next) {
int position;
/* do we have a pointer here? */
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_);
pdata->hashhi = 0;
pdata->hashlo = 0;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
}
mc_list = mc_list->next;
}
- if (count != (unsigned int)dev->mc_count)
+ if (count != (unsigned int)netdev_mc_count(dev))
SMSC_WARNING(DRV, "mc_count != dev->mc_count");
pdata->hashhi = hash_high;
mac_cr &= (~MAC_CR_PRMS_);
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mc_list = dev->mc_list;
u32 hash_lo = 0, hash_hi = 0;
if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
rcr |= SONIC_RCR_PRO;
} else {
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
if (sonic_debug > 2)
- printk("sonic_multicast_list: mc_count %d\n", dev->mc_count);
+ printk("sonic_multicast_list: mc_count %d\n",
+ netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
- for (i = 1; i <= dev->mc_count; i++) {
+ for (i = 1; i <= netdev_mc_count(dev); i++) {
addr = dmi->dmi_addr;
dmi = dmi->next;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode |= AcceptAll;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
- } else if (dev->mc_count <= 14) {
+ } else if (netdev_mc_count(dev) <= 14) {
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
+ for (i = 2, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev) + 2;
i++, mclist = mclist->next) {
eaddrs = (__be16 *)mclist->dmi_addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
/* The chip uses the upper 9 CRC bits
as index into the hash table */
value |= MAC_CONTROL_PR;
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
MAC_CONTROL_HP);
- } else if ((dev->mc_count > HASH_TABLE_SIZE)
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|| (dev->flags & IFF_ALLMULTI)) {
value |= MAC_CONTROL_PM;
value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
writel(0xffffffff, ioaddr + MAC_HASH_LOW);
- } else if (dev->mc_count == 0) { /* no multicast */
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
MAC_CONTROL_HO | MAC_CONTROL_HP);
} else {
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ mclist && i < netdev_mc_count(dev); i++, mclist = mclist->next) {
/* The upper 6 bits of the calculated CRC are used to
* index the contens of the hash table */
int bit_nr =
unsigned int value = 0;
DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, dev->mc_count, netdev_uc_count(dev));
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
if (dev->flags & IFF_PROMISC)
value = GMAC_FRAME_FILTER_PR;
- else if ((dev->mc_count > HASH_TABLE_SIZE)
+ else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|| (dev->flags & IFF_ALLMULTI)) {
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
int i;
u32 mc_filter[2];
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ mclist && i < netdev_mc_count(dev); i++, mclist = mclist->next) {
/* The upper 6 bits of the calculated CRC are used to
index the contens of the hash table */
int bit_nr =
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
struct dev_mc_list *dmi=dev->mc_list;
- int num_addrs=dev->mc_count;
+ int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
udelay(20);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writel(0xffff, bregs + BMAC_HTABLE0);
sbus_writel(0xffff, bregs + BMAC_HTABLE1);
sbus_writel(0xffff, bregs + BMAC_HTABLE2);
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
int bit;
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
int i;
if ((gp->dev->flags & IFF_ALLMULTI) ||
- (gp->dev->mc_count > 256)) {
+ (netdev_mc_count(gp->dev) > 256)) {
for (i=0; i<16; i++)
writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
rxcfg |= MAC_RXCFG_HFE;
for (i = 0; i < 16; i++)
hash_table[i] = 0;
- for (i = 0; i < gp->dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(gp->dev); i++) {
char *addrs = dmi->dmi_addr;
dmi = dmi->next;
HMD(("htable, "));
if ((hp->dev->flags & IFF_ALLMULTI) ||
- (hp->dev->mc_count > 64)) {
+ (netdev_mc_count(hp->dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < hp->dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(hp->dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
spin_lock_irq(&hp->happy_lock);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
return;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
/* Lock out others. */
netif_stop_queue(dev);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
/* Enable promiscuous mode */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > CAM_ENTRY_MAX - 3) {
+ netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
/* CAM 0, 1, 20 are reserved. */
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *cur_addr = dev->mc_list;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, cur_addr = cur_addr->next) {
if (!cur_addr)
break;
/* entry 0,1 is reserved. */
/* set IMF to accept all multicast frmaes */
for (i = 0; i < MAC_MCST_HASH_NUM; i++)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
- } else if (ndev->mc_count) {
+ } else if (!netdev_mc_empty(ndev)) {
u8 hash;
struct dev_mc_list *mclist;
u32 reg, val;
}
} else {
- DBG("only own mac %d\n", ndev->mc_count);
+ DBG("only own mac %d\n", netdev_mc_count(ndev));
rxf_val |= GMAC_RX_FILTER_AB;
}
WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
tg3_set_multi (tp, 1);
- } else if (dev->mc_count < 1) {
+ } else if (netdev_mc_empty(dev)) {
/* Reject all multicast. */
tg3_set_multi (tp, 0);
} else {
u32 bit;
u32 crc;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
} else {
- for ( i = 0; i < dev->mc_count; i++ ) {
+ for ( i = 0; i < netdev_mc_count(dev); i++ ) {
if ( i < 3 ) {
TLan_SetMac( dev, i + 1,
(char *) &dmi->dmi_addr );
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
mclist = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
address[0] |= mclist->dmi_addr[2];
address[1] |= mclist->dmi_addr[3];
address[2] |= mclist->dmi_addr[4];
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next)
+ for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next)
{
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
{
int i;
struct dev_mc_list *mclist = dev->mc_list;
- for (i=0; i< dev->mc_count; i++)
+ for (i=0; i< netdev_mc_count(dev); i++)
{
((char *)(&tp->ocpl.FunctAddr))[0] |=
mclist->dmi_addr[2];
rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
- if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *mc = dev->mc_list;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
goto out;
}
- if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
macmode |= AcceptAllMulticast;
goto out;
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
build_setup_frame_hash (de->setup_frame, dev);
else
build_setup_frame_perfect (de->setup_frame, dev);
omr &= ~(OMR_PR | OMR_PM);
pa = build_setup_frame(dev, ALL); /* Build the basic frame */
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ for (i = 0; i < netdev_mc_count(dev) ;i++) {
addrs=dmi->dmi_addr;
dmi=dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
}
}
} else { /* Perfect filtering */
- for (j=0; j<dev->mc_count; j++) {
+ for (j=0; j<netdev_mc_count(dev); j++) {
addrs=dmi->dmi_addr;
dmi=dmi->next;
for (i=0; i<ETH_ALEN; i++) {
/* Send setup frame */
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev, netdev_mc_count(dev)); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev, netdev_mc_count(dev)); /* DM9102/DM9102A */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
{
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
+ int mc_count = netdev_mc_count(dev);
DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
spin_lock_irqsave(&db->lock, flags);
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
- DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
+ DMFE_DBUG(0, "Pass all multicast address", mc_count);
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- DMFE_DBUG(0, "Set multicast address", dev->mc_count);
+ DMFE_DBUG(0, "Set multicast address", mc_count);
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev, mc_count); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev, mc_count); /* DM9102/DM9102A */
spin_unlock_irqrestore(&db->lock, flags);
}
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
csr6 |= AcceptAllMulticast | AcceptAllPhys;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
/* Should verify correctness on big-endian/__powerpc__ */
struct dev_mc_list *mclist;
int i;
- if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
+ if (netdev_mc_count(dev) > 64) {
+ /* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
if (tp->flags & COMET_MAC_ADDR)
filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
else
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) {
+ /* Must use a multicast hash table. */
build_setup_frame_hash(tp->setup_frame, dev);
tx_flags = 0x08400000 | 192;
} else {
update_cr6(db->cr6_data, ioaddr);
/* Send setup frame */
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
- ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
+ ULI526X_DBUG(0, "Pass all multicast address",
+ netdev_mc_count(dev));
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
spin_unlock_irqrestore(&db->lock, flags);
}
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
| AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(dev->flags & IFF_PROMISC) {
filter |= TYPHOON_RX_FILTER_PROMISCOUS;
- } else if((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
- } else if(dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
dmi = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
+ for (i = 0; i < netdev_mc_count(dev);
+ i++, dmi = dmi->next) {
/* Only support group multicast for now.
*/
if (net->flags & IFF_PROMISC) {
rx_ctl |= AX_RX_CTL_PRO;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= AX_RX_CTL_AMALL;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net); i++) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x01;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= 0x02;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net); i++) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- for (i = 0, mc = netdev->mc_list; mc && i < netdev->mc_count; i++, mc = mc->next) {
+ for (i = 0, mc = netdev->mc_list;
+ mc && i < netdev_mc_count(netdev);
+ i++, mc = mc->next) {
u32 crc = ether_crc_le(6, mc->dmi_addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
/* do not expect to see traffic of other PLCs */
filter |= PACKET_TYPE_PROMISCUOUS;
devinfo(dev, "promiscuous mode enabled");
- } else if (netdev->mc_count ||
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
filter |= PACKET_TYPE_ALL_MULTICAST;
devdbg(dev, "receive all multicast enabled");
if (net->flags & IFF_PROMISC) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS;
}
- else if ((net->mc_count) || (net->flags & IFF_ALLMULTI)) {
+ else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST;
}
if (net->flags & IFF_PROMISC) {
data->config |= HIF_REG_CONFIG_PROMISCUOUS;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > MCS7830_MAX_MCAST) {
+ netdev_mc_count(net) > MCS7830_MAX_MCAST) {
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
int i;
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net); i++) {
crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
mc_list = mc_list->next;
pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
if (netif_msg_link(pegasus))
pr_info("%s: Promiscuous mode enabled.\n", net->name);
- } else if (net->mc_count || (net->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
if (netif_msg_link(pegasus))
if (netdev->flags & IFF_PROMISC) {
dev->rx_creg |= cpu_to_le16(0x0001);
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
- } else if (netdev->mc_count ||
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
dev->rx_creg &= cpu_to_le16(0xfffe);
dev->rx_creg |= cpu_to_le16(0x0002);
devdbg(dev, "receive all multicast enabled");
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
- } else if (dev->net->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev->net)) {
struct dev_mc_list *mc_list = dev->net->mc_list;
int count = 0;
mc_list = mc_list->next;
}
- if (count != ((u32)dev->net->mc_count))
+ if (count != ((u32) netdev_mc_count(dev->net)))
devwarn(dev, "mc_count != dev->mc_count");
if (netif_msg_drv(dev))
rx_mode = 0x1C;
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
writel(0xffffffff, ®s->MARCAM[0]);
writel(0xffffffff, ®s->MARCAM[4]);
rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
- } else if ((dev->mc_count > vptr->multicast_limit) ||
+ } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
(dev->flags & IFF_ALLMULTI)) {
writel(0xffffffff, ®s->MARCAM[0]);
writel(0xffffffff, ®s->MARCAM[4]);
int offset = MCAM_SIZE - vptr->multicast_limit;
mac_get_cam_mask(regs, vptr->mCAMmask);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
mac_set_cam(regs, i + offset, mclist->dmi_addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
}
struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
int uc_count;
+ int mc_count;
void *buf;
int i;
allmulti ? "en" : "dis");
uc_count = netdev_uc_count(dev);
+ mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
- mac_data = buf = kzalloc(((uc_count + dev->mc_count) * ETH_ALEN) +
- (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
+ (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ mac_data = buf;
if (!buf) {
dev_warn(&dev->dev, "No memory for MAC address buffer\n");
return;
/* multicast list and count fill the end */
mac_data = (void *)&mac_data->macs[uc_count][0];
- mac_data->entries = dev->mc_count;
+ mac_data->entries = mc_count;
addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, addr = addr->next)
+ for (i = 0; i < mc_count; i++, addr = addr->next)
memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
- sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
+ sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
vmxnet3_copy_mc(struct net_device *netdev)
{
u8 *buf = NULL;
- u32 sz = netdev->mc_count * ETH_ALEN;
+ u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
if (sz <= 0xffff) {
int i;
struct dev_mc_list *mc = netdev->mc_list;
- for (i = 0; i < netdev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(netdev); i++) {
BUG_ON(!mc);
memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
ETH_ALEN);
if (netdev->flags & IFF_ALLMULTI)
new_mode |= VMXNET3_RXM_ALL_MULTI;
else
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
- netdev->mc_count * ETH_ALEN);
+ netdev_mc_count(netdev) * ETH_ALEN);
rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
new_table));
} else {
memset(&mac_info, 0, sizeof(struct macInfo));
/* Update individual M_CAST address list */
- if ((!vdev->all_multi_flg) && dev->mc_count) {
+ if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
list_head = &vdev->vpaths[0].mac_addr_list;
- if ((dev->mc_count +
+ if ((netdev_mc_count(dev) +
(vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
vdev->vpaths[0].max_mac_addr_cnt)
goto _set_all_mcast;
}
/* Add new ones */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
airo_set_promisc(ai);
}
- if ((dev->flags&IFF_ALLMULTI)||dev->mc_count>0) {
+ if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
/* Turn on multicast. (Should be already setup...) */
}
}
/* The Hermes doesn't seem to have an allmulti mode, so we go
* into promiscuous mode and let the upper levels deal. */
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > MAX_MULTICAST(priv))) {
+ (netdev_mc_count(dev) > MAX_MULTICAST(priv))) {
promisc = 1;
mc_count = 0;
} else {
promisc = 0;
- mc_count = dev->mc_count;
+ mc_count = netdev_mc_count(dev);
}
err = __orinoco_hw_set_multicast_list(priv, dev->mc_list, mc_count,
if (dev->flags & IFF_ALLMULTI)
ray_update_multi_list(dev, 1);
else {
- if (local->num_multi != dev->mc_count)
+ if (local->num_multi != netdev_mc_count(dev))
ray_update_multi_list(dev, 0);
}
} /* end set_multicast_list */
filter |= RNDIS_PACKET_TYPE_PROMISCUOUS |
RNDIS_PACKET_TYPE_ALL_LOCAL;
} else if (usbdev->net->flags & IFF_ALLMULTI ||
- usbdev->net->mc_count > priv->multicast_size) {
+ netdev_mc_count(usbdev->net) > priv->multicast_size) {
filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
- } else if (usbdev->net->mc_count > 0) {
- size = min(priv->multicast_size, usbdev->net->mc_count);
+ } else if (!netdev_mc_empty(usbdev->net)) {
+ size = min(priv->multicast_size, netdev_mc_count(usbdev->net));
buf = kmalloc(size * ETH_ALEN, GFP_KERNEL);
if (!buf) {
devwarn(usbdev,
unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
int i;
- if (dev->mc_count > ZD1201_MAXMULTI)
+ if (netdev_mc_count(dev) > ZD1201_MAXMULTI)
return;
- for (i=0; i<dev->mc_count; i++) {
+ for (i=0; i<netdev_mc_count(dev); i++) {
memcpy(reqbuf+i*ETH_ALEN, mc->dmi_addr, ETH_ALEN);
mc = mc->next;
}
zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
- dev->mc_count*ETH_ALEN, 0);
-
+ netdev_mc_count(dev) * ETH_ALEN, 0);
}
static int zd1201_config_commit(struct net_device *dev,
iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
iowrite16(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 64) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
struct dev_mc_list *mclist;
u16 hash_table[4];
int i;
memset(hash_table, 0, sizeof(hash_table));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit;
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
- if (netdev->mc_count > NIC_MAX_MCAST_LIST) {
+ if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) {
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
- if (netdev->mc_count < 1) {
+ if (netdev_mc_count(netdev) < 1) {
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
} else {
}
/* Set values in the private adapter struct */
- adapter->MCAddressCount = netdev->mc_count;
+ adapter->MCAddressCount = netdev_mc_count(netdev);
- if (netdev->mc_count) {
- count = netdev->mc_count - 1;
+ if (!netdev_mc_empty(netdev)) {
+ count = netdev_mc_count(netdev) - 1;
memcpy(adapter->MCList[count], mclist->dmi_addr, ETH_ALEN);
}
#ifdef PCMCIA_DEBUG
{
xstatic int old;
- if (old != dev->mc_count) {
- old = dev->mc_count;
+ if (old != netdev_mc_count(dev)) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
}
}
#endif
- if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
/* Multicast Mode */
rcvMode = rxConfRxEna + rxConfAMP + rxConfBcast;
} else if (dev->flags & IFF_PROMISC) {
int i;
char *addresses;
struct dev_mc_list *mc_list = dev->mc_list;
- int mc_count = dev->mc_count;
+ int mc_count = netdev_mc_count(dev);
ASSERT(adapter);
/* Unconditionally log net taps. */
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
- else if ((dev->mc_count > pDevice->multicast_limit)
+ else if ((netdev_mc_count(dev) > pDevice->multicast_limit)
|| (dev->flags & IFF_ALLMULTI)) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, 0xffffffff);
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
// Unconditionally log net taps.
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
- else if ((dev->mc_count > pDevice->multicast_limit) || (dev->flags & IFF_ALLMULTI)) {
+ else if ((netdev_mc_count(dev) > pDevice->multicast_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
MAC_REG_MAR0,
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (ii = 0, mclist = dev->mc_list; mclist && ii < dev->mc_count;
+ for (ii = 0, mclist = dev->mc_list; mclist && ii < netdev_mc_count(dev);
ii++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
#ifdef DEBUG_IOCTL_INFO
printk(KERN_DEBUG
"%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, dev->mc_count);
+ dev->name, dev->flags, netdev_mc_count(dev));
#endif
/* Are we asking for promiscuous mode,
* or too many multicast addresses for the hardware filter? */
if ((dev->flags & IFF_PROMISC) ||
(dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > I82586_MAX_MULTICAST_ADDRESSES)) {
+ (netdev_mc_count(dev) > I82586_MAX_MULTICAST_ADDRESSES)) {
/*
* Enable promiscuous mode: receive all packets.
*/
* in multicast list
*/
#ifdef MULTICAST_AVOID
- if (lp->promiscuous || (dev->mc_count != lp->mc_count))
+ if (lp->promiscuous || (netdev_mc_count(dev) != lp->mc_count))
#endif
{
lp->promiscuous = 0;
- lp->mc_count = dev->mc_count;
+ lp->mc_count = netdev_mc_count(dev);
wv_82586_reconfig(dev);
}
#ifdef DEBUG_IOCTL_INFO
printk(KERN_DEBUG "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, dev->mc_count);
+ dev->name, dev->flags, netdev_mc_count(dev));
#endif
if(dev->flags & IFF_PROMISC)
/* If all multicast addresses
* or too much multicast addresses for the hardware filter */
if((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > I82593_MAX_MULTICAST_ADDRESSES))
+ (netdev_mc_count(dev) > I82593_MAX_MULTICAST_ADDRESSES))
{
/*
* Disable promiscuous mode, but active the all multicast mode
*/
#ifdef MULTICAST_AVOID
if(lp->promiscuous || lp->allmulticast ||
- (dev->mc_count != lp->mc_count))
+ (netdev_mc_count(dev) != lp->mc_count))
#endif
{
lp->promiscuous = 0;
lp->allmulticast = 0;
- lp->mc_count = dev->mc_count;
+ lp->mc_count = netdev_mc_count(dev);
wv_82593_reconfig(dev);
}
if(!wv_82593_cmd(dev, "wv_82593_config(): mc-setup",
OP0_MC_SETUP, SR0_MC_SETUP_DONE))
ret = FALSE;
- lp->mc_count = dev->mc_count; /* remember to avoid repeated reset */
+ /* remember to avoid repeated reset */
+ lp->mc_count = netdev_mc_count(dev);
}
/* Job done, clear the flag */
( dev->flags & IFF_MULTICAST ) ? "Multicast " : "",
( dev->flags & IFF_ALLMULTI ) ? "All-Multicast" : "" );
- DBG_PRINT( " mc_count: %d\n", dev->mc_count );
+ DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
- for( x = 0, mclist = dev->mc_list; mclist && x < dev->mc_count;
+ for( x = 0, mclist = dev->mc_list; mclist && x < netdev_mc_count(dev);
x++, mclist = mclist->next ) {
DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr),
mclist->dmi_addrlen );
DBG_PRINT( "Enabling Promiscuous mode (IFF_PROMISC)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
}
- else if(( dev->mc_count > HCF_MAX_MULTICAST ) ||
+ else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST) ||
( dev->flags & IFF_ALLMULTI )) {
/* Shutting off this filter will enable all multicast frames to
be sent up from the device; however, this is a static RID, so
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
- else if( dev->mc_count != 0 ) {
+ else if (!netdev_mc_empty(dev)) {
/* Set the multicast addresses */
- lp->ltvRecord.len = ( dev->mc_count * 3 ) + 1;
+ lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
for( x = 0, mclist = dev->mc_list;
- ( x < dev->mc_count ) && ( mclist != NULL );
+ ( x < netdev_mc_count(dev)) && ( mclist != NULL );
x++, mclist = mclist->next ) {
memcpy( &( lp->ltvRecord.u.u8[x * ETH_ALEN] ),
mclist->dmi_addr, ETH_ALEN );
struct sk_buff *skb;
int size;
- BT_DBG("%s mc_count %d", dev->name, dev->mc_count);
+ BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev));
size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2;
skb = alloc_skb(size, GFP_ATOMIC);
/* FIXME: We should group addresses here. */
- for (i = 0; i < dev->mc_count && i < BNEP_MAX_MULTICAST_FILTERS; i++) {
+ for (i = 0;
+ i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS;
+ i++) {
memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
dmi = dmi->next;
netif_addr_lock_bh(dev);
__dev_addr_discard(&dev->mc_list);
- dev->mc_count = 0;
+ netdev_mc_count(dev) = 0;
netif_addr_unlock_bh(dev);
}
/* Enable promiscuous mode */
IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
}
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
+ else if ((dev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > HW_MAX_ADDRS) {
/* Disable promiscuous mode, use normal mode. */
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
/* hardware_set_filter(NULL); */
irlan_set_multicast_filter(self, TRUE);
}
- else if (dev->mc_count) {
+ else if (!netdev_mc_empty(dev)) {
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
/* Walk the address list, and load the filter */
/* hardware_set_filter(dev->mc_list); */
netif_addr_lock_bh(dev);
spin_lock_bh(&local->filter_lock);
__dev_addr_unsync(&local->mc_list, &local->mc_count,
- &dev->mc_list, &dev->mc_count);
+ &dev->mc_list, dev->mc_count);
spin_unlock_bh(&local->filter_lock);
netif_addr_unlock_bh(dev);