int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
{
+ struct hl_cb *cb;
int rc;
+ /* Make sure that a CB handle isn't destroyed by user more than once */
+ if (!mmg->is_kernel_mem_mgr) {
+ cb = hl_cb_get(mmg, cb_handle);
+ if (!cb) {
+ dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
+ cb_handle);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
+ hl_cb_put(cb);
+ if (rc) {
+ dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
+ cb_handle);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
+out:
if (rc < 0)
return rc; /* Invalid handle */
if (rc)
goto free_chip_info;
- hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
+ hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr, 1);
hdev->reset_wq = create_singlethread_workqueue("hl_device_reset");
if (!hdev->reset_wq) {
* @dev: back pointer to the owning device
* @lock: protects handles
* @handles: an idr holding all active handles to the memory buffers in the system.
+ * @is_kernel_mem_mgr: indicate whether the memory manager is the per-device kernel memory manager
*/
struct hl_mem_mgr {
struct device *dev;
spinlock_t lock;
struct idr handles;
+ u8 is_kernel_mem_mgr;
};
/**
* @size: holds the CB's size.
* @roundup_size: holds the cb size after roundup to page size.
* @cs_cnt: holds number of CS that this CB participates in.
+ * @is_handle_destroyed: atomic boolean indicating whether or not the CB handle was destroyed.
* @is_pool: true if CB was acquired from the pool, false otherwise.
* @is_internal: internally allocated
* @is_mmu_mapped: true if the CB is mapped to the device's MMU.
u32 size;
u32 roundup_size;
atomic_t cs_cnt;
+ atomic_t is_handle_destroyed;
u8 is_pool;
u8 is_internal;
u8 is_mmu_mapped;
char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
-void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
+void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg, u8 is_kernel_mem_mgr);
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args);
nonseekable_open(inode, filp);
hl_ctx_mgr_init(&hpriv->ctx_mgr);
- hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr);
+ hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr, 0);
hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
*
* @dev: owner device pointer
* @mmg: structure to initialize
+ * @is_kernel_mem_mgr: indicate whether the memory manager is the per-device kernel memory manager
*
* Initialize an instance of unified memory manager
*/
-void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
+void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg, u8 is_kernel_mem_mgr)
{
mmg->dev = dev;
spin_lock_init(&mmg->lock);
idr_init(&mmg->handles);
+ mmg->is_kernel_mem_mgr = is_kernel_mem_mgr;
}
/**