le = get_tx_le(sky2);
le->addr = 0;
le->opcode = OP_ADDR64 | HW_OWNER;
- sky2->tx_addr64 = 0;
}
static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
dma_addr_t map, unsigned len)
{
struct sky2_rx_le *le;
- u32 hi = upper_32_bits(map);
- if (sky2->rx_addr64 != hi) {
+ if (sizeof(dma_addr_t) > sizeof(u32)) {
le = sky2_next_rx(sky2);
- le->addr = cpu_to_le32(hi);
+ le->addr = cpu_to_le32(upper_32_bits(map));
le->opcode = OP_ADDR64 | HW_OWNER;
- sky2->rx_addr64 = upper_32_bits(map + len);
}
le = sky2_next_rx(sky2);
struct tx_ring_info *re;
unsigned i, len;
dma_addr_t mapping;
- u32 addr64;
u16 mss;
u8 ctrl;
len = skb_headlen(skb);
mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
- addr64 = upper_32_bits(mapping);
- /* Send high bits if changed or crosses boundary */
- if (addr64 != sky2->tx_addr64 ||
- upper_32_bits(mapping + len) != sky2->tx_addr64) {
+ /* Send high bits if needed */
+ if (sizeof(dma_addr_t) > sizeof(u32)) {
le = get_tx_le(sky2);
- le->addr = cpu_to_le32(addr64);
+ le->addr = cpu_to_le32(upper_32_bits(mapping));
le->opcode = OP_ADDR64 | HW_OWNER;
- sky2->tx_addr64 = upper_32_bits(mapping + len);
}
/* Check for TCP Segmentation Offload */
mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
- addr64 = upper_32_bits(mapping);
- if (addr64 != sky2->tx_addr64) {
+
+ if (sizeof(dma_addr_t) > sizeof(u32)) {
le = get_tx_le(sky2);
- le->addr = cpu_to_le32(addr64);
+ le->addr = cpu_to_le32(upper_32_bits(mapping));
le->ctrl = 0;
le->opcode = OP_ADDR64 | HW_OWNER;
- sky2->tx_addr64 = addr64;
}
le = get_tx_le(sky2);
u16 tx_cons; /* next le to check */
u16 tx_prod; /* next le to use */
u16 tx_next; /* debug only */
- u32 tx_addr64;
+
u16 tx_pending;
u16 tx_last_mss;
u32 tx_tcpsum;
struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp;
struct sky2_rx_le *rx_le;
- u32 rx_addr64;
+
u16 rx_next; /* next re to check */
u16 rx_put; /* next le index to use */
u16 rx_pending;