kfree(st);
return ret;
}
- gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+ gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
(fb_info->start >> PAGE_SHIFT);
for_each_sg(st->sgl, sg, page_num, i) {
dma_addr_t dma_addr =
intel_wakeref_t wakeref;
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
- swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
+ swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
- swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
+ swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y));
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
intel_suspend_hw(dev_priv);
- i915_ggtt_suspend(&dev_priv->ggtt);
+ i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
i915_save_display(dev_priv);
if (ret)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
- i915_ggtt_resume(&dev_priv->ggtt);
+ i915_ggtt_resume(to_gt(dev_priv)->ggtt);
intel_dmc_ucode_resume(dev_priv);
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ return to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
i915_gem_object_is_tiled(obj);
}
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct drm_i915_gem_get_aperture *args = data;
struct i915_vma *vma;
u64 pinned;
bool write)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma;
struct i915_gem_ww_ctx ww;
int ret;
struct i915_vma *vma)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
i915_gem_object_unpin_pages(obj);
if (drm_mm_node_allocated(node)) {
const struct drm_i915_gem_pread *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
void __user *user_data;
const struct drm_i915_gem_pwrite *args)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
struct drm_mm_node node;
*/
list_for_each_entry_safe(obj, on,
- &i915->ggtt.userfault_list, userfault_link)
+ &to_gt(i915)->ggtt->userfault_list, userfault_link)
__i915_gem_object_release_mmap_gtt(obj);
/*
* in use by hardware (i.e. they are pinned), we should not be powering
* down! All other fences will be reacquired by the user upon waking.
*/
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
+ struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
/*
* Ideally we want to assert that the fence register is not
u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma;
int ret;
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
- i915_ggtt_resume(&dev_priv->ggtt);
+ i915_ggtt_resume(to_gt(dev_priv)->ggtt);
intel_init_clock_gating(dev_priv);
}
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
- intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
+ intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
i915_gem_suspend_late(dev_priv);
intel_gt_driver_remove(to_gt(dev_priv));
struct sg_table *pages)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
/* XXX This does not prevent more requests being submitted! */
if (unlikely(ggtt->do_idle_maps))
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
+ GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
+ GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
- value = i915->ggtt.num_fences;
+ value = to_gt(i915)->ggtt->num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
value = !!i915->overlay;
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
const u64 delay_ticks = 0xffffffffffffffff -
- intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
- atomic64_read(&stream->perf->noa_programming_delay));
+ intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
+ atomic64_read(&stream->perf->noa_programming_delay));
const u32 base = stream->engine->mmio_base;
#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
u32 *batch, *ts0, *cs, *jump;
static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
- return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
+ return intel_gt_clock_interval_to_ns(to_gt(perf->i915),
2ULL << exponent);
}