HL_DEVICE_HW_STATE_DIRTY
};
+#define HL_MMU_VA_ALIGNMENT_NOT_NEEDED 0
+
/**
* struct hl_mmu_properties - ASIC specific MMU address translation properties.
* @start_addr: virtual start address of the memory region.
void hl_vm_fini(struct hl_device *hdev);
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
- enum hl_va_range_type type, u32 size);
+ enum hl_va_range_type type, u32 size, u32 alignment);
int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
u64 start_addr, u64 size);
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
* @ctx: current context
* @type: virtual addresses range type.
* @size: requested block size.
+ * @alignment: required alignment in bytes of the virtual block start address,
+ * 0 means no alignment.
*
* This function does the following:
* - Iterate on the virtual block list to find a suitable virtual block for the
- * given size.
+ * given size and alignment.
* - Reserve the requested block and update the list.
* - Return the start address of the virtual block.
*/
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
- enum hl_va_range_type type, u32 size)
+ enum hl_va_range_type type, u32 size, u32 alignment)
{
return get_va_block(hdev, ctx->va_range[type], size, 0,
- ctx->va_range[type]->page_size);
+ max(alignment, ctx->va_range[type]->page_size));
}
/**
}
hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx,
- HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ);
+ HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ,
+ HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
if (!hdev->internal_cb_va_base)
goto destroy_internal_cb_pool;