static void
i915_ttm_place_from_region(const struct intel_memory_region *mr,
struct ttm_place *place,
+ resource_size_t offset,
+ resource_size_t size,
unsigned int flags)
{
memset(place, 0, sizeof(*place));
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place->flags |= TTM_PL_FLAG_CONTIGUOUS;
- if (mr->io_size && mr->io_size < mr->total) {
+ if (offset != I915_BO_INVALID_OFFSET) {
+ place->fpfn = offset >> PAGE_SHIFT;
+ place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
+ } else if (mr->io_size && mr->io_size < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
placement->num_placement = 1;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested, flags);
+ obj->mm.region, requested, obj->bo_offset,
+ obj->base.size, flags);
/* Cache this on object? */
placement->num_busy_placement = num_allowed;
for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
+ i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
+ obj->bo_offset, obj->base.size, flags);
if (num_allowed == 0) {
*busy = *requested;
struct ttm_placement placement;
int ret;
- i915_ttm_place_from_region(mr, &requested, flags);
+ i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
+ obj->base.size, flags);
placement.num_placement = 1;
placement.num_busy_placement = 1;
placement.placement = &requested;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+ obj->bo_offset = offset;
+
/* Don't put on a region list until we're either locked or fully initialized. */
obj->mm.region = mem;
INIT_LIST_HEAD(&obj->mm.region_link);
#include "intel_region_ttm.h"
+#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
/**
* DOC: TTM support structure
*/
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+ resource_size_t offset,
resource_size_t size,
unsigned int flags)
{
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
- if (mem->io_size && mem->io_size < mem->total) {
+ if (offset != I915_BO_INVALID_OFFSET) {
+ place.fpfn = offset >> PAGE_SHIFT;
+ place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
+ } else if (mem->io_size && mem->io_size < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
int err;
obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
+ obj->bo_offset,
obj->base.size,
obj->flags);
if (IS_ERR(obj->mm.res))
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags);
+ obj->bo_offset = offset;
+
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);