rmb();
bnx2x_init_rx_rings(bp);
bnx2x_init_tx_rings(bp);
- /* Initialize MOD_ABS interrupts */
- bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
- bp->common.shmem_base, bp->common.shmem2_base,
- BP_PORT(bp));
+
+ if (IS_VF(bp)) {
+ bnx2x_memset_stats(bp);
+ return;
+ }
+
+ if (IS_PF(bp)) {
+ /* Initialize MOD_ABS interrupts */
+ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+ bp->common.shmem_base,
+ bp->common.shmem2_base, BP_PORT(bp));
+
+ /* initialize the default status block and sp ring */
+ bnx2x_init_def_sb(bp);
+ bnx2x_update_dsb_idx(bp);
+ bnx2x_init_sp_ring(bp);
+ }
+ }
- bnx2x_init_def_sb(bp);
- bnx2x_update_dsb_idx(bp);
- bnx2x_init_sp_ring(bp);
+ void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
+ {
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
bnx2x_pf_init(bp);
u8 wol_cap;
bool wol;
u32 uc_macs; /* Count of secondary UC MAC programmed */
+ u16 asic_rev;
+ u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
u16 max_mcast_mac;
skb->vlan_tci = 0;
}
- skb = __vlan_put_tag(skb, vlan_tag);
+ if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
+ if (!vlan_tag)
+ vlan_tag = adapter->pvid;
+ if (skip_hw_vlan)
+ *skip_hw_vlan = true;
+ }
+
+ if (vlan_tag) {
- skb = __vlan_put_tag(skb, vlan_tag);
++ skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ if (unlikely(!skb))
+ return skb;
+
+ skb->vlan_tci = 0;
+ }
+
+ /* Insert the outer VLAN, if any */
+ if (adapter->qnq_vid) {
+ vlan_tag = adapter->qnq_vid;
++ skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ if (unlikely(!skb))
+ return skb;
+ if (skip_hw_vlan)
+ *skip_hw_vlan = true;
+ }
+
return skb;
}
if (is4addr)
hdr_size = sizeof(*unicast_4addr_packet);
- if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
+ /* function returns -EREMOTE for promiscuous packets */
+ check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
+
+ /* Even though the packet is not for us, we might save it to use for
+ * decoding a later received coded packet
+ */
+ if (check == -EREMOTE)
+ batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
+
+ if (check < 0)
return NET_RX_DROP;
-
- if (!batadv_check_unicast_ttvn(bat_priv, skb))
+ if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
return NET_RX_DROP;
/* packet for me */
}
}
+/* Packet is added to VJ-style prequeue for processing in process
+ * context, if a reader task is waiting. Apparently, this exciting
+ * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
+ * failed somewhere. Latency? Burstiness? Well, at least now we will
+ * see, why it failed. 8)8) --ANK
+ *
+ */
+bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (sysctl_tcp_low_latency || !tp->ucopy.task)
+ return false;
+
+ if (skb->len <= tcp_hdrlen(skb) &&
+ skb_queue_len(&tp->ucopy.prequeue) == 0)
+ return false;
+
++ skb_dst_force(skb);
+ __skb_queue_tail(&tp->ucopy.prequeue, skb);
+ tp->ucopy.memory += skb->truesize;
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
+ struct sk_buff *skb1;
+
+ BUG_ON(sock_owned_by_user(sk));
+
+ while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
+ sk_backlog_rcv(sk, skb1);
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPPREQUEUEDROPPED);
+ }
+
+ tp->ucopy.memory = 0;
+ } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
+ wake_up_interruptible_sync_poll(sk_sleep(sk),
+ POLLIN | POLLRDNORM | POLLRDBAND);
+ if (!inet_csk_ack_scheduled(sk))
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ (3 * tcp_rto_min(sk)) / 4,
+ TCP_RTO_MAX);
+ }
+ return true;
+}
+EXPORT_SYMBOL(tcp_prequeue);
+
/*
* From tcp_input.c
*/
#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
- #define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
-#define MAC802154_MAX_XMIT_ATTEMPTS 3
-
+ #define MAC802154_CHAN_NONE 0xff /* No channel is assigned */
extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
extern struct ieee802154_mlme_ops mac802154_mlme_wpan;