habanalabs: fix H/W block handling for partial unmappings
authorTomer Tayar <ttayar@habana.ai>
Wed, 3 Aug 2022 13:36:02 +0000 (16:36 +0300)
committerOded Gabbay <ogabbay@kernel.org>
Sun, 18 Sep 2022 10:29:51 +0000 (13:29 +0300)
Several munmap() calls can be done or a mapped H/W block that has a
larger size than a page size.
Releasing the object should be done only when all mapped range is
unmapped.

Signed-off-by: Tomer Tayar <ttayar@habana.ai>
Reviewed-by: Oded Gabbay <ogabbay@kernel.org>
Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
drivers/misc/habanalabs/common/debugfs.c
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/memory.c

index 69fd3ed..48d3ec8 100644 (file)
@@ -291,14 +291,16 @@ static int vm_show(struct seq_file *s, void *data)
                if (ctx->asid != HL_KERNEL_ASID_ID &&
                    !list_empty(&ctx->hw_block_mem_list)) {
                        seq_puts(s, "\nhw_block mappings:\n\n");
-                       seq_puts(s, "    virtual address    size    HW block id\n");
-                       seq_puts(s, "-------------------------------------------\n");
+                       seq_puts(s,
+                               "    virtual address    block size    mapped size    HW block id\n");
+                       seq_puts(s,
+                               "---------------------------------------------------------------\n");
                        mutex_lock(&ctx->hw_block_list_lock);
-                       list_for_each_entry(lnode, &ctx->hw_block_mem_list,
-                                           node) {
+                       list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
                                seq_printf(s,
-                                       "    0x%-14lx   %-6u      %-9u\n",
-                                       lnode->vaddr, lnode->size, lnode->id);
+                                       "    0x%-14lx   %-6u        %-6u             %-9u\n",
+                                       lnode->vaddr, lnode->block_size, lnode->mapped_size,
+                                       lnode->id);
                        }
                        mutex_unlock(&ctx->hw_block_list_lock);
                }
index f495a4b..237a887 100644 (file)
@@ -2063,14 +2063,16 @@ struct hl_vm_hash_node {
  * @node: node to hang on the list in context object.
  * @ctx: the context this node belongs to.
  * @vaddr: virtual address of the HW block.
- * @size: size of the block.
+ * @block_size: size of the block.
+ * @mapped_size: size of the block which is mapped. May change if partial un-mappings are done.
  * @id: HW block id (handle).
  */
 struct hl_vm_hw_block_list_node {
        struct list_head        node;
        struct hl_ctx           *ctx;
        unsigned long           vaddr;
-       u32                     size;
+       u32                     block_size;
+       u32                     mapped_size;
        u32                     id;
 };
 
index a027fa8..5bc704d 100644 (file)
@@ -1442,6 +1442,13 @@ static void hw_block_vm_close(struct vm_area_struct *vma)
        struct hl_vm_hw_block_list_node *lnode =
                (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
        struct hl_ctx *ctx = lnode->ctx;
+       long new_mmap_size;
+
+       new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start);
+       if (new_mmap_size > 0) {
+               lnode->mapped_size = new_mmap_size;
+               return;
+       }
 
        mutex_lock(&ctx->hw_block_list_lock);
        list_del(&lnode->node);
@@ -1502,7 +1509,8 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
 
        lnode->ctx = ctx;
        lnode->vaddr = vma->vm_start;
-       lnode->size = block_size;
+       lnode->block_size = block_size;
+       lnode->mapped_size = lnode->block_size;
        lnode->id = block_id;
 
        vma->vm_private_data = lnode;