Use clflush on Intel hardware to flush cached objects.
uint32_t(*evict_mask) (struct drm_buffer_object *bo);
int (*move) (struct drm_buffer_object * bo,
int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
+ void (*ttm_cache_flush)(struct drm_ttm *ttm);
};
/*
}
return p;
}
+EXPORT_SYMBOL(drm_ttm_get_page);
int drm_ttm_populate(struct drm_ttm * ttm)
{
int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
{
-
+ struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
int ret = 0;
struct drm_ttm_backend *be;
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) {
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
- }
+ } else if ((bo_mem->flags & DRM_BO_FLAG_CACHED) &&
+ bo_driver->ttm_cache_flush)
+ bo_driver->ttm_cache_flush(ttm);
if ((ret = be->func->bind(be, bo_mem))) {
ttm->state = ttm_evicted;
}
return 0;
}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
+static inline void clflush(volatile void *__p)
+{
+ asm volatile("clflush %0" : "+m" (*(char __force *)__p));
+}
+#endif
+
+static inline void drm_cache_flush_addr(void *virt)
+{
+ int i;
+
+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ clflush(virt+i);
+}
+
+static inline void drm_cache_flush_page(struct page *p)
+{
+ drm_cache_flush_addr(page_address(p));
+}
+
+void i915_flush_ttm(struct drm_ttm *ttm)
+{
+ int i;
+
+ if (!ttm)
+ return;
+
+ DRM_MEMORYBARRIER();
+ for (i = ttm->num_pages-1; i >= 0; i--)
+ drm_cache_flush_page(drm_ttm_get_page(ttm, i));
+ DRM_MEMORYBARRIER();
+}
.init_mem_type = i915_init_mem_type,
.evict_mask = i915_evict_mask,
.move = i915_move,
+ .ttm_cache_flush = i915_flush_ttm,
};
#endif
extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
extern int i915_move(struct drm_buffer_object *bo, int evict,
int no_wait, struct drm_bo_mem_reg *new_mem);
-
+void i915_flush_ttm(struct drm_ttm *ttm);
#endif
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))