memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
}
+
+ if (BITS_PER_LONG == 64) {
+ struct page **pages;
+ pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+ if (!pages)
+ goto err_free;
+ for (i = 0; i < buf->nbufs; ++i)
+ pages[i] = virt_to_page(buf->u.page_list[i].buf);
+ buf->u.direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+ if (!buf->u.direct.buf)
+ goto err_free;
+ }
}
return 0;
dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
buf->u.direct.map);
else {
+ if (BITS_PER_LONG == 64)
+ vunmap(buf->u.direct.buf);
+
for (i = 0; i < buf->nbufs; ++i)
if (buf->u.page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
};
struct mlx4_buf {
- union {
+ struct {
struct mlx4_buf_list direct;
struct mlx4_buf_list *page_list;
} u;
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
- if (buf->nbufs == 1)
+ if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return buf->u.direct.buf + offset;
else
return buf->u.page_list[offset >> PAGE_SHIFT].buf +