struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];
if (vars->line_speed == SPEED_AUTO_NEG &&
(CHIP_IS_E1x(bp) ||
- CHIP_IS_E2(bp))) {
+ CHIP_IS_E2(bp)))
bnx2x_set_parallel_detection(phy, params);
- if (params->phy[INT_PHY].config_init)
- params->phy[INT_PHY].config_init(phy,
- params,
- vars);
- }
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(phy, params, vars);
}
/* Init external phy*/
static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; }
+static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params) {return 0; }
static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
tristate
depends on PCI && X86
default n
-
-config MLX5_DEBUG
- bool "Verbose debugging output" if (MLX5_CORE && EXPERT)
- depends on MLX5_CORE
- default y
- ---help---
- This option causes debugging code to be compiled into the
- mlx5_core driver. The output can be turned on via the
- debug_mask module parameter (which can also be set after
- the driver is loaded through sysfs).
#endif
}
+/**
+ * netdev_sent_queue - report the number of bytes queued to hardware
+ * @dev: network device
+ * @bytes: number of bytes queued to the hardware device queue
+ *
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes
+ */
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
#endif
}
+/**
+ * netdev_completed_queue - report bytes and packets completed by device
+ * @dev: network device
+ * @pkts: actual number of packets sent over the medium
+ * @bytes: actual number of bytes sent over the medium
+ *
+ * Report the number of bytes and packets transmitted by the network device
+ * hardware queue over the physical medium, @bytes must exactly match the
+ * @bytes amount passed to netdev_sent_queue()
+ */
static inline void netdev_completed_queue(struct net_device *dev,
unsigned int pkts, unsigned int bytes)
{
#endif
}
+/**
+ * netdev_reset_queue - reset the packets and bytes count of a network device
+ * @dev_queue: network device
+ *
+ * Reset the bytes and packet count of a network device and clear the
+ * software flow control OFF bit for this network device
+ */
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
/* If reordering is high then always grow cwnd whenever data is
* delivered regardless of its ordering. Otherwise stay conservative
- * and only grow cwnd on in-order delivery in Open state, and retain
- * cwnd in Disordered state (RFC5681). A stretched ACK with
+ * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
* new SACK or ECE mark may first advance cwnd here and later reduce
* cwnd in tcp_fastretrans_alert() based on more states.
*/
if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
return flag & FLAG_FORWARD_PROGRESS;
- return inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
- flag & FLAG_DATA_ACKED;
+ return flag & FLAG_DATA_ACKED;
}
/* Check that window update is acceptable.
if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
} else {
+ tcp_grow_window(sk, skb);
kfree_skb_partial(skb, fragstolen);
skb = NULL;
}
if (tcp_is_sack(tp))
tcp_sack_new_ofo_skb(sk, seq, end_seq);
end:
- if (skb)
+ if (skb) {
+ tcp_grow_window(sk, skb);
skb_set_owner_r(skb, sk);
+ }
}
static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
}
EXPORT_SYMBOL_GPL(netlink_remove_tap);
+static bool netlink_filter_tap(const struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ bool pass = false;
+
+ /* We take the more conservative approach and
+ * whitelist socket protocols that may pass.
+ */
+ switch (sk->sk_protocol) {
+ case NETLINK_ROUTE:
+ case NETLINK_USERSOCK:
+ case NETLINK_SOCK_DIAG:
+ case NETLINK_NFLOG:
+ case NETLINK_XFRM:
+ case NETLINK_FIB_LOOKUP:
+ case NETLINK_NETFILTER:
+ case NETLINK_GENERIC:
+ pass = true;
+ break;
+ }
+
+ return pass;
+}
+
static int __netlink_deliver_tap_skb(struct sk_buff *skb,
struct net_device *dev)
{
struct sk_buff *nskb;
+ struct sock *sk = skb->sk;
int ret = -ENOMEM;
dev_hold(dev);
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
nskb->dev = dev;
+ nskb->protocol = htons((u16) sk->sk_protocol);
+
ret = dev_queue_xmit(nskb);
if (unlikely(ret > 0))
ret = net_xmit_errno(ret);
int ret;
struct netlink_tap *tmp;
+ if (!netlink_filter_tap(skb))
+ return;
+
list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
ret = __netlink_deliver_tap_skb(skb, tmp->dev);
if (unlikely(ret))