dma_addr_t dmaaddr;
if (tx)
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len,
DMA_TO_DEVICE);
else
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len,
DMA_FROM_DEVICE);
int tx)
{
if (tx)
- ssb_dma_unmap_single(ring->dev->dev,
+ dma_unmap_single(ring->dev->dev->dma_dev,
addr, len,
DMA_TO_DEVICE);
else
- ssb_dma_unmap_single(ring->dev->dev,
+ dma_unmap_single(ring->dev->dev->dma_dev,
addr, len,
DMA_FROM_DEVICE);
}
{
B43legacy_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_cpu(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
static inline
{
B43legacy_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_device(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
static inline
static int alloc_ringmemory(struct b43legacy_dmaring *ring)
{
/* GFP flags must match the flags in free_ringmemory()! */
- ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
- B43legacy_DMA_RINGMEMSIZE,
- &(ring->dmabase),
- GFP_KERNEL);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ B43legacy_DMA_RINGMEMSIZE,
+ &(ring->dmabase),
+ GFP_KERNEL);
if (!ring->descbase) {
b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
" failed\n");
static void free_ringmemory(struct b43legacy_dmaring *ring)
{
- ssb_dma_free_consistent(ring->dev->dev, B43legacy_DMA_RINGMEMSIZE,
- ring->descbase, ring->dmabase, GFP_KERNEL);
+ dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
+ ring->descbase, ring->dmabase);
}
/* Reset the RX DMA channel */
size_t buffersize,
bool dma_to_device)
{
- if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
+ if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
return 1;
switch (ring->type) {
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
- dma_test = ssb_dma_map_single(dev->dev, ring->txhdr_cache,
+ dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
if (!ring->txhdr_cache)
goto err_kfree_meta;
- dma_test = ssb_dma_map_single(dev->dev,
+ dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache,
sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
goto err_kfree_txhdr_cache;
}
- ssb_dma_unmap_single(dev->dev, dma_test,
- sizeof(struct b43legacy_txhdr_fw3),
- DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev->dma_dev, dma_test,
+ sizeof(struct b43legacy_txhdr_fw3),
+ DMA_TO_DEVICE);
}
ring->nr_slots = nr_slots;
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = ssb_dma_set_mask(dev->dev, mask);
- if (!err)
- break;
+ err = dma_set_mask(dev->dev->dma_dev, mask);
+ if (!err) {
+ err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
+ }
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = 1;