void radeon_ms_irq_emit(struct drm_device *dev);
irqreturn_t radeon_ms_irq_handler(DRM_IRQ_ARGS);
void radeon_ms_irq_preinstall(struct drm_device * dev);
-void radeon_ms_irq_postinstall(struct drm_device * dev);
+int radeon_ms_irq_postinstall(struct drm_device * dev);
int radeon_ms_irq_init(struct drm_device *dev);
void radeon_ms_irq_restore(struct drm_device *dev, struct radeon_state *state);
void radeon_ms_irq_save(struct drm_device *dev, struct radeon_state *state);
struct radeon_pcie *pcie;
unsigned long page_first;
struct page **pages;
+ struct page *dummy_read_page;
unsigned long num_pages;
int populated;
int bound;
static void pcie_ttm_destroy(struct drm_ttm_backend *backend);
static int pcie_ttm_needs_ub_cache_adjust(struct drm_ttm_backend *backend);
static int pcie_ttm_populate(struct drm_ttm_backend *backend,
- unsigned long num_pages, struct page **pages);
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page);
static int pcie_ttm_unbind(struct drm_ttm_backend *backend);
static struct drm_ttm_backend_func radeon_pcie_gart_ttm_backend =
for (i = 0, page = page_first; i < pcie_gart->num_pages; i++, page++) {
struct page *cur_page = pcie_gart->pages[i];
+
+ if (!page) {
+ cur_page = pcie_gart->dummy_read_page;
+ }
/* write value */
page_base = page_to_phys(cur_page);
pcie_gart_set_page_base(pcie_gart->pcie, page, page_base);
}
static int pcie_ttm_populate(struct drm_ttm_backend *backend,
- unsigned long num_pages, struct page **pages)
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page)
{
struct radeon_pcie_gart *pcie_gart;
radeon_ack_irqs(dev_priv, mask);
}
-void radeon_ms_irq_postinstall(struct drm_device * dev)
+int radeon_ms_irq_postinstall(struct drm_device * dev)
{
radeon_ms_irq_enable(dev);
+ return 0;
}
int radeon_ms_irq_init(struct drm_device *dev)