#include <linux/uaccess.h>
#include <linux/genalloc.h>
+static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_vm_va_block *va_block, *tmp;
+ dma_addr_t bus_addr;
+ u64 virt_addr;
+ u32 page_size = prop->pmmu.page_size;
+ s32 offset;
+ int rc;
+
+ if (!hdev->supports_cb_mapping) {
+ dev_err_ratelimited(hdev->dev,
+ "Cannot map CB because no VA range is allocated for CB mapping\n");
+ return -EINVAL;
+ }
+
+ if (!hdev->mmu_enable) {
+ dev_err_ratelimited(hdev->dev,
+ "Cannot map CB because MMU is disabled\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&cb->va_block_list);
+
+ for (bus_addr = cb->bus_address;
+ bus_addr < cb->bus_address + cb->size;
+ bus_addr += page_size) {
+
+ virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
+ if (!virt_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate device virtual address for CB\n");
+ rc = -ENOMEM;
+ goto err_va_pool_free;
+ }
+
+ va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
+ if (!va_block) {
+ rc = -ENOMEM;
+ gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
+ goto err_va_pool_free;
+ }
+
+ va_block->start = virt_addr;
+ va_block->end = virt_addr + page_size;
+ va_block->size = page_size;
+ list_add_tail(&va_block->node, &cb->va_block_list);
+ }
+
+ mutex_lock(&ctx->mmu_lock);
+
+ bus_addr = cb->bus_address;
+ offset = 0;
+ list_for_each_entry(va_block, &cb->va_block_list, node) {
+ rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
+ list_is_last(&va_block->node,
+ &cb->va_block_list));
+ if (rc) {
+ dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
+ va_block->start);
+ goto err_va_umap;
+ }
+
+ bus_addr += va_block->size;
+ offset += va_block->size;
+ }
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+ cb->is_mmu_mapped = true;
+
+ return 0;
+
+err_va_umap:
+ list_for_each_entry(va_block, &cb->va_block_list, node) {
+ if (offset <= 0)
+ break;
+ hl_mmu_unmap(ctx, va_block->start, va_block->size,
+ offset <= va_block->size);
+ offset -= va_block->size;
+ }
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+err_va_pool_free:
+ list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
+ gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
+ list_del(&va_block->node);
+ kfree(va_block);
+ }
+
+ return rc;
+}
+
+static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm_va_block *va_block, *tmp;
+
+ mutex_lock(&ctx->mmu_lock);
+
+ list_for_each_entry(va_block, &cb->va_block_list, node)
+ if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
+ list_is_last(&va_block->node,
+ &cb->va_block_list)))
+ dev_warn_ratelimited(hdev->dev,
+ "Failed to unmap CB's va 0x%llx\n",
+ va_block->start);
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+ list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
+ gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
+ list_del(&va_block->node);
+ kfree(va_block);
+ }
+}
+
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
if (cb->is_internal)
hl_debugfs_remove_cb(cb);
+ if (cb->is_mmu_mapped)
+ cb_unmap_mem(cb->ctx, cb);
+
hl_ctx_put(cb->ctx);
cb_do_release(hdev, cb);
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
- u64 *handle)
+ bool map_cb, u64 *handle)
{
struct hl_cb *cb;
bool alloc_new_cb = true;
cb->ctx = ctx;
hl_ctx_get(hdev, cb->ctx);
+ if (map_cb) {
+ if (ctx_id == HL_KERNEL_ASID_ID) {
+ dev_err(hdev->dev,
+ "CB mapping is not supported for kernel context\n");
+ rc = -EINVAL;
+ goto release_cb;
+ }
+
+ rc = cb_map_mem(ctx, cb);
+ if (rc)
+ goto release_cb;
+ }
+
spin_lock(&mgr->cb_lock);
rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
spin_unlock(&mgr->cb_lock);
if (rc < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
- goto release_cb;
+ goto unmap_mem;
}
cb->id = (u64) rc;
return 0;
+unmap_mem:
+ if (cb->is_mmu_mapped)
+ cb_unmap_mem(cb->ctx, cb);
release_cb:
hl_ctx_put(cb->ctx);
cb_do_release(hdev, cb);
rc = -EINVAL;
} else {
rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
- args->in.cb_size, false, &handle);
+ args->in.cb_size, false,
+ !!(args->in.flags & HL_CB_FLAGS_MAP),
+ &handle);
}
memset(args, 0, sizeof(*args));
int rc;
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
- internal_cb, &cb_handle);
+ internal_cb, false, &cb_handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate CB for the kernel driver %d\n", rc);
return 0;
}
+
+int hl_cb_va_pool_init(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ if (!hdev->supports_cb_mapping)
+ return 0;
+
+ ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
+ if (!ctx->cb_va_pool) {
+ dev_err(hdev->dev,
+ "Failed to create VA gen pool for CB mapping\n");
+ return -ENOMEM;
+ }
+
+ rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
+ prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add memory to VA gen pool for CB mapping\n");
+ goto err_pool_destroy;
+ }
+
+ return 0;
+
+err_pool_destroy:
+ gen_pool_destroy(ctx->cb_va_pool);
+
+ return rc;
+}
+
+void hl_cb_va_pool_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (!hdev->supports_cb_mapping)
+ return;
+
+ gen_pool_destroy(ctx->cb_va_pool);
+}
* @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
* @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
* @mmu_dram_default_page_addr: DRAM default page physical address.
+ * @cb_va_start_addr: virtual start address of command buffers which are mapped
+ * to the device's MMU.
+ * @cb_va_end_addr: virtual end address of command buffers which are mapped to
+ * the device's MMU.
* @mmu_pgt_size: MMU page tables total size.
* @mmu_pte_size: PTE size in MMU page tables.
* @mmu_hop_table_size: MMU hop table size.
u64 pcie_aux_dbi_reg_addr;
u64 mmu_pgt_addr;
u64 mmu_dram_default_page_addr;
+ u64 cb_va_start_addr;
+ u64 cb_va_end_addr;
u32 mmu_pgt_size;
u32 mmu_pte_size;
u32 mmu_hop_table_size;
* @lock: spinlock to protect mmap/cs flows.
* @debugfs_list: node in debugfs list of command buffers.
* @pool_list: node in pool list of command buffers.
+ * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
+ * the device's MMU.
* @id: the CB's ID.
* @kernel_address: Holds the CB's kernel virtual address.
* @bus_address: Holds the CB's DMA address.
* @mmap: true if the CB is currently mmaped to user.
* @is_pool: true if CB was acquired from the pool, false otherwise.
* @is_internal: internaly allocated
+ * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
*/
struct hl_cb {
struct kref refcount;
spinlock_t lock;
struct list_head debugfs_list;
struct list_head pool_list;
+ struct list_head va_block_list;
u64 id;
u64 kernel_address;
dma_addr_t bus_address;
u8 mmap;
u8 is_pool;
u8 is_internal;
+ u8 is_mmu_mapped;
};
* @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
* MMU hash or walking the PGT requires talking this lock.
* @debugfs_list: node in debugfs list of contexts.
+ * @cb_va_pool: device VA pool for command buffers which are mapped to the
+ * device's MMU.
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
* index to cs_pending array.
struct mutex mmu_lock;
struct list_head debugfs_list;
struct hl_cs_counters cs_counters;
+ struct gen_pool *cb_va_pool;
u64 cs_sequence;
u64 *dram_default_hops;
spinlock_t cs_lock;
* @sync_stream_queue_idx: helper index for sync stream queues initialization.
* @supports_coresight: is CoreSight supported.
* @supports_soft_reset: is soft reset supported.
+ * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
*/
struct hl_device {
struct pci_dev *pdev;
u8 sync_stream_queue_idx;
u8 supports_coresight;
u8 supports_soft_reset;
+ u8 supports_cb_mapping;
/* Parameters for bring-up */
u8 mmu_enable;
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
- u64 *handle);
+ bool map_cb, u64 *handle);
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
bool internal_cb);
int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
+int hl_cb_va_pool_init(struct hl_ctx *ctx);
+void hl_cb_va_pool_fini(struct hl_ctx *ctx);
void hl_cs_rollback_all(struct hl_device *hdev);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,