bool register_ts_record, u64 ts_handle, u64 ts_offset,
u32 *status, u64 *timestamp)
{
- u32 cq_patched_handle, ts_patched_handle;
+ u32 cq_patched_handle;
struct hl_user_pending_interrupt *pend;
struct hl_mmap_mem_buf *buf;
struct hl_cb *cq_cb;
if (register_ts_record) {
dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
interrupt->interrupt_id, ts_offset, cq_counters_offset);
-
- /* TODO:
- * See if this can be removed.
- * Embedding type in handle will no longer be needed as soon as we
- * switch to using a single memory manager for all memory types.
- * We may still need the page shift, though.
- */
- ts_patched_handle = lower_32_bits(ts_handle >> PAGE_SHIFT);
- buf = hl_mmap_mem_buf_get(mmg, ts_patched_handle);
+ buf = hl_mmap_mem_buf_get(mmg, ts_handle);
if (!buf) {
rc = -EINVAL;
goto put_cq_cb;
}
vm_pgoff = vma->vm_pgoff;
- vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
case HL_MMAP_TYPE_CB:
+ vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
return hl_cb_mmap(hpriv, vma);
case HL_MMAP_TYPE_BLOCK:
+ vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
return hl_hw_block_mmap(hpriv, vma);
case HL_MMAP_TYPE_TS_BUFF:
};
/**
- * struct hl_mmap_mem_buf_ops - describes unified memory manager buffer behavior
+ * struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior
+ * @mem_id: memory type identifier, embedded in the handle and used to identify
+ * the memory type by handle.
* @alloc: callback executed on buffer allocation, shall allocate the memory,
* set it under buffer private, and set mappable size.
* @mmap: callback executed on mmap, must map the buffer to vma
* @release: callback executed on release, must free the resources used by the buffer
*/
-struct hl_mmap_mem_buf_ops {
+struct hl_mmap_mem_buf_behavior {
+ u64 mem_id;
+
int (*alloc)(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args);
int (*mmap)(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args);
void (*release)(struct hl_mmap_mem_buf *buf);
};
/**
- * struct hl_mmap_mem_buf_ops - describes a single unified memory buffer
- * @ops: buffer behavior
+ * struct hl_mmap_mem_buf - describes a single unified memory buffer
+ * @behavior: buffer behavior
* @mmg: back pointer to the unified memory manager
* @refcount: reference counter for buffer users
* @private: pointer to buffer behavior private data
* @handle: the buffer id in mmg handles store
*/
struct hl_mmap_mem_buf {
- struct hl_mmap_mem_buf_ops *ops;
+ struct hl_mmap_mem_buf_behavior *behavior;
struct hl_mem_mgr *mmg;
struct kref refcount;
void *private;
atomic_t mmap;
u64 real_mapped_size;
u64 mappable_size;
- u32 handle;
+ u64 handle;
};
/**
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args);
struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg,
- u32 handle);
+ u64 handle);
int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf);
struct hl_mmap_mem_buf *
hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
- struct hl_mmap_mem_buf_ops *behavior, gfp_t gfp,
+ struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
void *args);
#ifdef CONFIG_DEBUG_FS
return -ENOMEM;
}
-static struct hl_mmap_mem_buf_ops hl_ts_behavior = {
+static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
+ .mem_id = HL_MMAP_TYPE_TS_BUFF,
.mmap = hl_ts_mmap,
.alloc = hl_ts_alloc_buf,
.release = ts_buff_release,
if (!buf)
return -ENOMEM;
- /* TODO:
- * Remove HL_MMAP_TYPE_TS_BUFF.
- * Embedding type in handle will no longer be needed as soon as we
- * switch to using a single memory manager for all memory types.
- */
- *handle = ((u64)buf->handle | HL_MMAP_TYPE_TS_BUFF) << PAGE_SHIFT;
+ *handle = buf->handle;
return 0;
}
* @return Find the buffer in the store and return a pointer to its descriptor.
* Increase buffer refcount. If not found - return NULL.
*/
-struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u32 handle)
+struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
{
struct hl_mmap_mem_buf *buf;
spin_lock(&mmg->lock);
- buf = idr_find(&mmg->handles, handle);
+ buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
if (!buf) {
spin_unlock(&mmg->lock);
dev_warn(mmg->dev,
- "Buff get failed, no match to handle %u\n", handle);
+ "Buff get failed, no match to handle %llu\n", handle);
return NULL;
}
kref_get(&buf->refcount);
idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
spin_unlock(&buf->mmg->lock);
- if (buf->ops->release)
- buf->ops->release(buf);
+ if (buf->behavior->release)
+ buf->behavior->release(buf);
kfree(buf);
}
*/
struct hl_mmap_mem_buf *
hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
- struct hl_mmap_mem_buf_ops *behavior, gfp_t gfp,
+ struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
void *args)
{
struct hl_mmap_mem_buf *buf;
goto free_buf;
}
- buf->handle = rc;
buf->mmg = mmg;
- buf->ops = behavior;
+ buf->behavior = behavior;
+ buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
kref_init(&buf->refcount);
- rc = buf->ops->alloc(buf, gfp, args);
+ rc = buf->behavior->alloc(buf, gfp, args);
if (rc) {
dev_err(mmg->dev, "Failure in buffer alloc callback %d\n",
rc);
goto remove_idr;
}
- dev_dbg(mmg->dev, "Created buff object handle %u\n", buf->handle);
return buf;
remove_idr:
{
struct hl_mmap_mem_buf *buf;
u64 user_mem_size;
- u32 handle;
+ u64 handle;
int rc;
/* We use the page offset to hold the idr and thus we need to clear
* it before doing the mmap itself
*/
- handle = vma->vm_pgoff;
+ handle = vma->vm_pgoff << PAGE_SHIFT;
vma->vm_pgoff = 0;
/* Reference was taken here */
buf = hl_mmap_mem_buf_get(mmg, handle);
if (!buf) {
dev_err(mmg->dev,
- "Memory mmap failed, no match to handle %u\n", handle);
+ "Memory mmap failed, no match to handle %llu\n", handle);
return -EINVAL;
}
vma->vm_private_data = buf;
- rc = buf->ops->mmap(buf, vma, args);
+ rc = buf->behavior->mmap(buf, vma, args);
if (rc) {
atomic_set(&buf->mmap, 0);
goto put_mem;