size_t size, dma_addr_t *dma_handle,
int node)
{
+ struct device *device = mlx5_core_dma_dev(dev);
struct mlx5_priv *priv = &dev->priv;
- struct device *device = dev->device;
int original_node;
void *cpu_handle;
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
- dma_free_coherent(dev->device, buf->size, buf->frags->buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
buf->frags->map);
kfree(buf->frags);
if (!frag->buf)
goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) {
- dma_free_coherent(dev->device, frag_sz,
+ dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift);
err_free_buf:
while (i--)
- dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map);
kfree(buf->frags);
err_out:
for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE);
- dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
buf->frags[i].map);
size -= frag_sz;
}
__set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
- dma_free_coherent(dev->device, PAGE_SIZE,
+ dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap);
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = dev->device;
-
- cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+ cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
&cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
return 0;
}
- dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
cmd->alloc_dma);
- cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
+ cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2 * MLX5_ADAPTER_PAGE_SIZE - 1,
&cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = dev->device;
-
- dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
cmd->alloc_dma);
}
if (!cmd->stats)
return -ENOMEM;
- cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
+ cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
if (!cmd->pool) {
err = -ENOMEM;
goto dma_pool_err;
static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev;
dma_addr_t dma;
void *buff;
gfp_t gfp;
}
tracer->buff.log_buf = buff;
+ ddev = mlx5_core_dma_dev(dev);
dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE);
if (dma_mapping_error(ddev, dma)) {
mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n",
static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev;
if (!tracer->buff.log_buf)
return;
+ ddev = mlx5_core_dma_dev(dev);
dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE);
free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
}
struct page *page)
{
struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev = mlx5_core_dma_dev(dev);
u32 out_seq_num;
u32 in_seq_num;
dma_addr_t dma;
static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
struct xsk_buff_pool *pool)
{
- struct device *dev = priv->mdev->device;
+ struct device *dev = mlx5_core_dma_dev(priv->mdev);
return xsk_pool_dma_map(pool, dev, 0);
}
goto err_out;
}
- pdev = sq->channel->priv->mdev->device;
+ pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
buf->dma_addr = dma_map_single(pdev, &buf->progress,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
priv_rx = buf->priv_rx;
resync = &priv_rx->resync;
- dev = resync->priv->mdev->device;
+ dev = mlx5_core_dma_dev(resync->priv->mdev);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
- c->pdev = priv->mdev->device;
+ c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
mlx5e_build_rx_cq_param(priv, params, xsk, ¶m->cqp);
}
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
- param->wq.buf_numa_node = dev_to_node(mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
- param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
- param->wq.buf_numa_node = dev_to_node(mdev->device);
- param->wq.db_numa_node = dev_to_node(mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
return mlx5e_alloc_cq_common(mdev, param, cq);
}
if (unlikely(!buf->sg[0].data))
goto out;
- dma_device = &conn->fdev->mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
buf->sg[0].size, buf->dma_dir);
err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
{
struct device *dma_device;
- dma_device = &conn->fdev->mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
if (buf->sg[1].data)
dma_unmap_single(dma_device, buf->sg[1].dma_addr,
buf->sg[1].size, buf->dma_dir);
pci_set_drvdata(dev->pdev, dev);
dev->bar_addr = pci_resource_start(pdev, 0);
- priv->numa_node = dev_to_node(&dev->pdev->dev);
+ priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
err = mlx5_pci_enable_device(dev);
if (err) {
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
+{
+ return &dev->pdev->dev;
+}
+
enum {
MLX5_CMD_DATA, /* print command payload only */
MLX5_CMD_TIME, /* print command execution time */
rb_erase(&fwp->rb_node, root);
if (in_free_list)
list_del(&fwp->list);
- dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
+ dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
- struct device *device = dev->device;
+ struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
struct page *page;
u64 zero_addr = 1;
if (!mr)
return NULL;
- dma_device = &mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(mdev);
dma_addr = dma_map_single(dma_device, buf, size,
DMA_BIDIRECTIONAL);
err = dma_mapping_error(dma_device, dma_addr);
static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
{
mlx5_core_destroy_mkey(mdev, &mr->mkey);
- dma_unmap_single(&mdev->pdev->dev, mr->dma_addr, mr->size,
+ dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
DMA_BIDIRECTIONAL);
kfree(mr);
}