skb = tx_buf->skb;
#ifdef BCM_TSO
/* partial BD completions possible with TSO packets */
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
u16 last_idx, last_ring_idx;
last_idx = sw_cons +
struct cpl_tx_pkt *cpl;
#ifdef NETIF_F_TSO
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
int eth_type;
struct cpl_tx_pkt_lso *hdr;
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
int err;
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
* tso gets written back prematurely before the data is fully
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
- !skb_shinfo(skb)->gso_size) {
+ !skb_is_gso(skb)) {
tx_ring->last_tx_tso = 0;
size -= 4;
}
#ifdef NETIF_F_TSO
/* Controller Erratum workaround */
- if (!skb->data_len && tx_ring->last_tx_tso &&
- !skb_shinfo(skb)->gso_size)
+ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
count++;
#endif
np->tx_skbuff[nr] = skb;
#ifdef NETIF_F_TSO
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
#endif
uint16_t ipcse, tucse, mss;
int err;
- if(likely(skb_shinfo(skb)->gso_size)) {
+ if (likely(skb_is_gso(skb))) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
#endif
#ifdef LOOPBACK_TSO
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
BUG_ON(skb->protocol != htons(ETH_P_IP));
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_is_gso(skb)) {
printk(KERN_ERR
"myri10ge: %s: TSO but wanted to linearize?!?!?\n",
mgp->dev->name);
count = sizeof(dma_addr_t) / sizeof(u32);
count += skb_shinfo(skb)->nr_frags * count;
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
++count;
if (skb->ip_summed == CHECKSUM_HW)
* If problems develop with TSO, check this first.
*/
numDesc = skb_shinfo(skb)->nr_frags + 1;
- if(skb_tso_size(skb))
+ if (skb_is_gso(skb))
numDesc++;
/* When checking for free space in the ring, we need to also
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
- if(skb_tso_size(skb)) {
+ if (skb_is_gso(skb)) {
first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
first_txd->numDesc++;
queue = card->qdio.out_qs
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
large_send = card->options.large_send;
/*are we able to do TSO ? If so ,prepare and send it from here */
static inline int skb_gso_ok(struct sk_buff *skb, int features)
{
- return net_gso_ok(features, skb_shinfo(skb)->gso_size ?
+ return net_gso_ok(features, skb_is_gso(skb) ?
skb_shinfo(skb)->gso_type : 0);
}
{ }
#endif
+static inline int skb_is_gso(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_size;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
/* drop mtu oversized packets except gso */
- if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
+ if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
kfree_skb(skb);
else {
#ifdef CONFIG_BRIDGE_NETFILTER
{
if (skb->protocol == htons(ETH_P_IP) &&
skb->len > skb->dev->mtu &&
- !skb_shinfo(skb)->gso_size)
+ !skb_is_gso(skb))
return ip_fragment(skb, br_dev_queue_push_xmit);
else
return br_dev_queue_push_xmit(skb);
return dst_output(skb);
}
#endif
- if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
+ if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
return ip_fragment(skb, ip_finish_output2);
else
return ip_finish_output2(skb);
while (size > 0) {
int i;
- if (skb_shinfo(skb)->gso_size)
+ if (skb_is_gso(skb))
len = size;
else {
}
#endif
- if (!skb_shinfo(skb)->gso_size)
+ if (!skb_is_gso(skb))
return xfrm4_output_finish2(skb);
skb->protocol = htons(ETH_P_IP);
int ip6_output(struct sk_buff *skb)
{
- if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
+ if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
dst_allfrag(skb->dst))
return ip6_fragment(skb, ip6_output2);
else
skb->priority = sk->sk_priority;
mtu = dst_mtu(dst);
- if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) {
+ if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
dst_output);
{
struct sk_buff *segs;
- if (!skb_shinfo(skb)->gso_size)
+ if (!skb_is_gso(skb))
return xfrm6_output_finish2(skb);
skb->protocol = htons(ETH_P_IP);