* of available pages for the requested size.
*/
static int
-ccio_alloc_range(struct ioc *ioc, size_t size)
+ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
{
unsigned int pages_needed = size >> IOVP_SHIFT;
unsigned int res_idx;
ioc->msingle_pages += size >> IOVP_SHIFT;
#endif
- idx = ccio_alloc_range(ioc, size);
+ idx = ccio_alloc_range(ioc, dev, size);
iovp = (dma_addr_t)MKIOVP(idx);
pdir_start = &(ioc->pdir_base[idx]);
static inline unsigned int
iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
- struct scatterlist *startsg, int nents,
- int (*iommu_alloc_range)(struct ioc *, size_t))
+ struct scatterlist *startsg, int nents,
+ int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
{
struct scatterlist *contig_sg; /* contig chunk head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
sg_dma_address(contig_sg) =
PIDE_FLAG
- | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT)
+ | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
| dma_offset;
n_mappings++;
}
* resource bit map.
*/
static int
-sba_alloc_range(struct ioc *ioc, size_t size)
+sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
{
unsigned int pages_needed = size >> IOVP_SHIFT;
#ifdef SBA_COLLECT_STATS
ioc->msingle_calls++;
ioc->msingle_pages += size >> IOVP_SHIFT;
#endif
- pide = sba_alloc_range(ioc, size);
+ pide = sba_alloc_range(ioc, dev, size);
iovp = (dma_addr_t) pide << IOVP_SHIFT;
DBG_RUN("%s() 0x%p -> 0x%lx\n",