},
};
- void *node;
+ struct intel_region_reserve {
+ struct list_head link;
++ struct ttm_resource *res;
+ };
+
struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private *i915,
u16 class, u16 instance)
return NULL;
}
- static u64
- intel_memory_region_free_pages(struct intel_memory_region *mem,
- struct list_head *blocks)
+ /**
+ * intel_memory_region_unreserve - Unreserve all previously reserved
+ * ranges
+ * @mem: The region containing the reserved ranges.
+ */
+ void intel_memory_region_unreserve(struct intel_memory_region *mem)
{
- struct i915_buddy_block *block, *on;
- u64 size = 0;
+ struct intel_region_reserve *reserve, *next;
- list_for_each_entry_safe(block, on, blocks, link) {
- size += i915_buddy_block_size(&mem->mm, block);
- i915_buddy_free(&mem->mm, block);
- }
- INIT_LIST_HEAD(blocks);
+ if (!mem->priv_ops || !mem->priv_ops->free)
+ return;
- return size;
- }
-
- void
- __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
- struct list_head *blocks)
- {
mutex_lock(&mem->mm_lock);
- mem->avail += intel_memory_region_free_pages(mem, blocks);
- mutex_unlock(&mem->mm_lock);
- }
-
- void
- __intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
- {
- struct list_head blocks;
-
- INIT_LIST_HEAD(&blocks);
- list_add(&block->link, &blocks);
- __intel_memory_region_put_pages_buddy(block->private, &blocks);
- }
-
- int
- __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags,
- struct list_head *blocks)
- {
- unsigned int min_order = 0;
- unsigned long n_pages;
-
- GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
- GEM_BUG_ON(!list_empty(blocks));
-
- if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
- min_order = ilog2(mem->min_page_size) -
- ilog2(mem->mm.chunk_size);
- }
-
- if (flags & I915_ALLOC_CONTIGUOUS) {
- size = roundup_pow_of_two(size);
- min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
+ list_for_each_entry_safe(reserve, next, &mem->reserved, link) {
+ list_del(&reserve->link);
- mem->priv_ops->free(mem, reserve->node);
++ mem->priv_ops->free(mem, reserve->res);
+ kfree(reserve);
}
-
- if (size > mem->mm.size)
- return -E2BIG;
-
- n_pages = size >> ilog2(mem->mm.chunk_size);
-
- mutex_lock(&mem->mm_lock);
-
- do {
- struct i915_buddy_block *block;
- unsigned int order;
-
- order = fls(n_pages) - 1;
- GEM_BUG_ON(order > mem->mm.max_order);
- GEM_BUG_ON(order < min_order);
-
- do {
- block = i915_buddy_alloc(&mem->mm, order);
- if (!IS_ERR(block))
- break;
-
- if (order-- == min_order)
- goto err_free_blocks;
- } while (1);
-
- n_pages -= BIT(order);
-
- block->private = mem;
- list_add_tail(&block->link, blocks);
-
- if (!n_pages)
- break;
- } while (1);
-
- mem->avail -= size;
mutex_unlock(&mem->mm_lock);
- return 0;
-
- err_free_blocks:
- intel_memory_region_free_pages(mem, blocks);
- mutex_unlock(&mem->mm_lock);
- return -ENXIO;
}
- struct i915_buddy_block *
- __intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
+ /**
+ * intel_memory_region_reserve - Reserve a memory range
+ * @mem: The region for which we want to reserve a range.
+ * @offset: Start of the range to reserve.
+ * @size: The size of the range to reserve.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+ int intel_memory_region_reserve(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size)
{
- struct i915_buddy_block *block;
- LIST_HEAD(blocks);
int ret;
+ struct intel_region_reserve *reserve;
- ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
- if (ret)
- return ERR_PTR(ret);
+ if (!mem->priv_ops || !mem->priv_ops->reserve)
+ return -EINVAL;
- block = list_first_entry(&blocks, typeof(*block), link);
- list_del_init(&block->link);
- return block;
- }
+ reserve = kzalloc(sizeof(*reserve), GFP_KERNEL);
+ if (!reserve)
+ return -ENOMEM;
- int intel_memory_region_init_buddy(struct intel_memory_region *mem)
- {
- return i915_buddy_init(&mem->mm, resource_size(&mem->region),
- PAGE_SIZE);
- }
-
- void intel_memory_region_release_buddy(struct intel_memory_region *mem)
- {
- i915_buddy_free_list(&mem->mm, &mem->reserved);
- i915_buddy_fini(&mem->mm);
- }
-
- int intel_memory_region_reserve(struct intel_memory_region *mem,
- u64 offset, u64 size)
- {
- int ret;
- reserve->node = mem->priv_ops->reserve(mem, offset, size);
- if (IS_ERR(reserve->node)) {
- ret = PTR_ERR(reserve->node);
++ reserve->res = mem->priv_ops->reserve(mem, offset, size);
++ if (IS_ERR(reserve->res)) {
++ ret = PTR_ERR(reserve->res);
+ kfree(reserve);
+ return ret;
+ }
mutex_lock(&mem->mm_lock);
- ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size);
+ list_add_tail(&reserve->link, &mem->reserved);
mutex_unlock(&mem->mm_lock);
- return ret;
+ return 0;
}
struct intel_memory_region *
--- /dev/null
-static void *intel_region_ttm_node_reserve(struct intel_memory_region *mem,
- resource_size_t offset,
- resource_size_t size)
+ // SPDX-License-Identifier: MIT
+ /*
+ * Copyright © 2021 Intel Corporation
+ */
+ #include <drm/ttm/ttm_bo_driver.h>
+ #include <drm/ttm/ttm_device.h>
++#include <drm/ttm/ttm_range_manager.h>
+
+ #include "i915_drv.h"
+ #include "i915_scatterlist.h"
+
+ #include "intel_region_ttm.h"
+
+ /**
+ * DOC: TTM support structure
+ *
+ * The code in this file deals with setting up memory managers for TTM
+ * LMEM and MOCK regions and converting the output from
+ * the managers to struct sg_table, Basically providing the mapping from
+ * i915 GEM regions to TTM memory types and resource managers.
+ */
+
+ /* A Zero-initialized driver for now. We don't have a TTM backend yet. */
+ static struct ttm_device_funcs i915_ttm_bo_driver;
+
+ /**
+ * intel_region_ttm_device_init - Initialize a TTM device
+ * @dev_priv: Pointer to an i915 device private structure.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
+ {
+ struct drm_device *drm = &dev_priv->drm;
+
+ return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver,
+ drm->dev, drm->anon_inode->i_mapping,
+ drm->vma_offset_manager, false, false);
+ }
+
+ /**
+ * intel_region_ttm_device_fini - Finalize a TTM device
+ * @dev_priv: Pointer to an i915 device private structure.
+ */
+ void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
+ {
+ ttm_device_fini(&dev_priv->bdev);
+ }
+
+ /*
+ * Map the i915 memory regions to TTM memory types. We use the
+ * driver-private types for now, reserving TTM_PL_VRAM for stolen
+ * memory and TTM_PL_TT for GGTT use if decided to implement this.
+ */
+ static int intel_region_to_ttm_type(struct intel_memory_region *mem)
+ {
+ int type;
+
+ GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
+ mem->type != INTEL_MEMORY_MOCK);
+
+ type = mem->instance + TTM_PL_PRIV;
+ GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
+
+ return type;
+ }
+
- struct ttm_resource res = {};
++static struct ttm_resource *
++intel_region_ttm_node_reserve(struct intel_memory_region *mem,
++ resource_size_t offset,
++ resource_size_t size)
+ {
+ struct ttm_resource_manager *man = mem->region_private;
+ struct ttm_place place = {};
- res.num_pages = size >> PAGE_SHIFT;
+ struct ttm_buffer_object mock_bo = {};
++ struct ttm_resource *res;
+ int ret;
+
+ /*
+ * Having to use a mock_bo is unfortunate but stems from some
+ * drivers having private managers that insist to know what the
+ * allocate memory is intended for, using it to send private
+ * data to the manager. Also recently the bo has been used to send
+ * alignment info to the manager. Assume that apart from the latter,
+ * none of the managers we use will ever access the buffer object
+ * members, hoping we can pass the alignment info in the
+ * struct ttm_place in the future.
+ */
+
+ place.fpfn = offset >> PAGE_SHIFT;
+ place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
- return ret ? ERR_PTR(ret) : res.mm_node;
++ mock_bo.base.size = size;
+ ret = man->func->alloc(man, &mock_bo, &place, &res);
+ if (ret == -ENOSPC)
+ ret = -ENXIO;
+
- void *node)
++ return ret ? ERR_PTR(ret) : res;
+ }
+
+ /**
+ * intel_region_ttm_node_free - Free a node allocated from a resource manager
+ * @mem: The region the node was allocated from.
+ * @node: The opaque node representing an allocation.
+ */
+ void intel_region_ttm_node_free(struct intel_memory_region *mem,
- struct ttm_resource res = {};
++ struct ttm_resource *res)
+ {
+ struct ttm_resource_manager *man = mem->region_private;
- res.mm_node = node;
- man->func->free(man, &res);
+
- void *node)
++ man->func->free(man, res);
+ }
+
+ static const struct intel_memory_region_private_ops priv_ops = {
+ .reserve = intel_region_ttm_node_reserve,
+ .free = intel_region_ttm_node_free,
+ };
+
+ int intel_region_ttm_init(struct intel_memory_region *mem)
+ {
+ struct ttm_device *bdev = &mem->i915->bdev;
+ int mem_type = intel_region_to_ttm_type(mem);
+ int ret;
+
+ ret = ttm_range_man_init(bdev, mem_type, false,
+ resource_size(&mem->region) >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ mem->chunk_size = PAGE_SIZE;
+ mem->max_order =
+ get_order(rounddown_pow_of_two(resource_size(&mem->region)));
+ mem->is_range_manager = true;
+ mem->priv_ops = &priv_ops;
+ mem->region_private = ttm_manager_type(bdev, mem_type);
+
+ return 0;
+ }
+
+ /**
+ * intel_region_ttm_fini - Finalize a TTM region.
+ * @mem: The memory region
+ *
+ * This functions takes down the TTM resource manager associated with the
+ * memory region, and if it was registered with the TTM device,
+ * removes that registration.
+ */
+ void intel_region_ttm_fini(struct intel_memory_region *mem)
+ {
+ int ret;
+
+ ret = ttm_range_man_fini(&mem->i915->bdev,
+ intel_region_to_ttm_type(mem));
+ GEM_WARN_ON(ret);
+ mem->region_private = NULL;
+ }
+
+ /**
+ * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node
+ * to an sg_table.
+ * @mem: The memory region.
+ * @node: The resource manager node obtained from the TTM resource manager.
+ *
+ * The gem backends typically use sg-tables for operations on the underlying
+ * io_memory. So provide a way for the backends to translate the
+ * nodes they are handed from TTM to sg-tables.
+ *
+ * Return: A malloced sg_table on success, an error pointer on failure.
+ */
+ struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
- return i915_sg_from_mm_node(node, mem->region.start);
++ struct ttm_resource *res)
+ {
-void *intel_region_ttm_node_alloc(struct intel_memory_region *mem,
- resource_size_t size,
- unsigned int flags)
++ struct ttm_range_mgr_node *range_node =
++ container_of(res, typeof(*range_node), base);
++
++ GEM_WARN_ON(!mem->is_range_manager);
++ return i915_sg_from_mm_node(&range_node->mm_nodes[0],
++ mem->region.start);
+ }
+
+ /**
+ * intel_region_ttm_node_alloc - Allocate memory resources from a region
+ * @mem: The memory region,
+ * @size: The requested size in bytes
+ * @flags: Allocation flags
+ *
+ * This functionality is provided only for callers that need to allocate
+ * memory from standalone TTM range managers, without the TTM eviction
+ * functionality. Don't use if you are not completely sure that's the
+ * case. The returned opaque node can be converted to an sg_table using
+ * intel_region_ttm_node_to_st(), and can be freed using
+ * intel_region_ttm_node_free().
+ *
+ * Return: A valid pointer on success, an error pointer on failure.
+ */
- struct ttm_resource res = {};
++struct ttm_resource *
++intel_region_ttm_node_alloc(struct intel_memory_region *mem,
++ resource_size_t size,
++ unsigned int flags)
+ {
+ struct ttm_resource_manager *man = mem->region_private;
+ struct ttm_place place = {};
- res.num_pages = size >> PAGE_SHIFT;
+ struct ttm_buffer_object mock_bo = {};
++ struct ttm_resource *res;
+ int ret;
+
+ /*
+ * We ignore the flags for now since we're using the range
+ * manager and contigous and min page size would be fulfilled
+ * by default if size is min page size aligned.
+ */
- return ret ? ERR_PTR(ret) : res.mm_node;
++ mock_bo.base.size = size;
+
+ if (mem->is_range_manager) {
+ if (size >= SZ_1G)
+ mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
+ else if (size >= SZ_2M)
+ mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
+ else if (size >= SZ_64K)
+ mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
+ }
+
+ ret = man->func->alloc(man, &mock_bo, &place, &res);
+ if (ret == -ENOSPC)
+ ret = -ENXIO;
++ return ret ? ERR_PTR(ret) : res;
+ }