tso->tsopl |= (iph->ihl &
CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
- tso->tsopl |= ((skb->h.th->doff << 2) &
+ tso->tsopl |= (tcp_hdrlen(skb) &
TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT;
tso->tsopl |= (skb_shinfo(skb)->gso_size &
TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
if (tcp_seg) {
/* TSO/GSO */
- proto_hdr_len = (skb_transport_offset(skb) +
- (skb->h.th->doff << 2));
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
buffer_info->length = proto_hdr_len;
page = virt_to_page(skb->data);
offset = (unsigned long)skb->data & ~PAGE_MASK;
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
proto_hdr_len = (skb_transport_offset(skb) +
- (skb->h.th->doff << 2));
+ tcp_hdrlen(skb));
if (unlikely(proto_hdr_len > len)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
return NETDEV_TX_OK;
}
- tcp_opt_len = ((skb->h.th->doff - 5) * 4);
vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
tcp_opt_len = 0;
- if (skb->h.th->doff > 5) {
- tcp_opt_len = (skb->h.th->doff - 5) << 2;
- }
+ if (skb->h.th->doff > 5)
+ tcp_opt_len = tcp_optlen(skb);
+
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
iph = ip_hdr(skb);
return err;
}
- hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2));
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
* points to just header, pull a few bytes of payload from
* frags into skb->data */
- hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2));
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
switch (adapter->hw.mac_type) {
unsigned int pull_size;
/* copy only eth/ip/tcp headers to immediate data and
* the rest of skb->data to sg1entry
*/
- headersize = ETH_HLEN + ip_hdrlen(skb) + (skb->h.th->doff * 4);
+ headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
skb_data_size = skb->len - skb->data_len;
return err;
}
- hdr_len = (skb_transport_offset(skb) + (skb->h.th->doff << 2));
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
iph = ip_hdr(skb);
iph->tot_len = 0;
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header must be at most 134 bytes long */
- cum_len = -(skb_transport_offset(skb) +
- (skb->h.th->doff << 2));
+ cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
/* for TSO, pseudo_hdr_offset holds mss.
* The firmware figures out where to put
{
if (desc->mss) {
desc->total_hdr_length = (sizeof(struct ethhdr) +
- ip_hdrlen(skb) +
- skb->h.th->doff * 4);
+ ip_hdrlen(skb) + tcp_hdrlen(skb));
netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
if (skb_shinfo(skb)->gso_size > 0) {
no_of_desc++;
- if ((ip_hdrlen(skb) + skb->h.th->doff * 4 +
+ if ((ip_hdrlen(skb) + tcp_hdrlen(skb) +
sizeof(struct ethhdr)) >
(sizeof(struct cmd_desc_type0) - 2)) {
no_of_desc++;
/* Check for TCP Segmentation Offload */
mss = skb_shinfo(skb)->gso_size;
if (mss != 0) {
- mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
+ mss += tcp_optlen(skb); /* TCP options */
mss += ip_hdrlen(skb) + sizeof(struct tcphdr);
mss += ETH_HLEN;
else {
struct iphdr *iph = ip_hdr(skb);
- tcp_opt_len = ((skb->h.th->doff - 5) * 4);
+ tcp_opt_len = tcp_optlen(skb);
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
iph->check = 0;
goto out_unlock;
}
- tcp_opt_len = ((skb->h.th->doff - 5) * 4);
+ tcp_opt_len = tcp_optlen(skb);
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
hdr_len = ip_tcp_len + tcp_opt_len;
skb_network_header(skb),
ip_hdrlen(skb),
skb->h.raw,
- skb->h.th->doff * 4);
+ tcp_hdrlen(skb));
else
eddp = qeth_eddp_create_eddp_data(qhdr,
skb_network_header(skb),
sizeof(struct ipv6hdr),
skb->h.raw,
- skb->h.th->doff * 4);
+ tcp_hdrlen(skb));
if (eddp == NULL) {
QETH_DBF_TEXT(trace, 2, "eddpfcnm");
ctx = qeth_eddp_create_context_generic(card, skb,
(sizeof(struct qeth_hdr) +
ip_hdrlen(skb) +
- skb->h.th->doff * 4));
+ tcp_hdrlen(skb)));
else if (skb->protocol == htons(ETH_P_IPV6))
ctx = qeth_eddp_create_context_generic(card, skb,
sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
- skb->h.th->doff*4);
+ tcp_hdrlen(skb));
else
QETH_DBF_TEXT(trace, 2, "cetcpinv");
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
+static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
+{
+ return skb->h.th->doff * 4;
+}
+
+static inline unsigned int tcp_optlen(const struct sk_buff *skb)
+{
+ return (skb->h.th->doff - 5) * 4;
+}
+
/* This defines a selective acknowledgement block. */
struct tcp_sack_block_wire {
__be32 start_seq;
return 0;
}
- if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
+ if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {
return 0;
}
- if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
+ if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {