txq->size = pp->tx_ring_size;
/* Allocate memory for TX descriptors */
- txq->descs_phys = (u32)txq->descs;
+ txq->descs_phys = (dma_addr_t)txq->descs;
if (txq->descs == NULL)
return -ENOMEM;
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
- tx_desc->buf_phys_addr = (u32)packet;
+ tx_desc->buf_phys_addr = (u32)(uintptr_t)packet;
tx_desc->data_size = length;
- flush_dcache_range((u32)packet,
- (u32)packet + ALIGN(length, PKTALIGN));
+ flush_dcache_range((ulong)packet,
+ (ulong)packet + ALIGN(length, PKTALIGN));
/* First and Last descriptor */
tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC;
rx_bytes = rx_desc->data_size - 6;
/* give packet to stack - skip on first 2 bytes */
- data = (u8 *)rx_desc->buf_cookie + 2;
+ data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2;
/*
* No cache invalidation needed here, since the rx_buffer's are
* located in a uncached memory region
if (!buffer_loc.tx_descs) {
/* Align buffer area for descs and rx_buffers to 1MiB */
bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
- mmu_set_region_dcache_behaviour((u32)bd_space, BD_SPACE,
+ mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE,
DCACHE_OFF);
buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space;
buffer_loc.rx_descs = (struct mvneta_rx_desc *)
- ((u32)bd_space +
+ ((phys_addr_t)bd_space +
MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc));
- buffer_loc.rx_buffers = (u32)
+ buffer_loc.rx_buffers = (phys_addr_t)
(bd_space +
MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc) +
MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc));