drm/amdgpu: stop using BO status for user pages
authorChristian König <christian.koenig@amd.com>
Tue, 5 Sep 2017 12:30:05 +0000 (14:30 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 12 Sep 2017 18:24:09 +0000 (14:24 -0400)
Instead use a counter to figure out if we need to set new pages or not.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index cc6de0b..f3e5611 100644 (file)
@@ -1802,6 +1802,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
                                  unsigned long end);
 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
                                       int *last_invalidated);
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
                                 struct ttm_mem_reg *mem);
index 283a216..4d3f8fb 100644 (file)
@@ -473,7 +473,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                        return -EPERM;
 
                /* Check if we have user pages and nobody bound the BO already */
-               if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
+               if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
+                   lobj->user_pages) {
                        amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
                                                     lobj->user_pages);
                        binding_userptr = true;
@@ -534,23 +535,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                INIT_LIST_HEAD(&need_pages);
                for (i = p->bo_list->first_userptr;
                     i < p->bo_list->num_entries; ++i) {
+                       struct amdgpu_bo *bo;
 
                        e = &p->bo_list->array[i];
+                       bo = e->robj;
 
-                       if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
+                       if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
                                 &e->user_invalidated) && e->user_pages) {
 
                                /* We acquired a page array, but somebody
                                 * invalidated it. Free it and try again
                                 */
                                release_pages(e->user_pages,
-                                             e->robj->tbo.ttm->num_pages,
+                                             bo->tbo.ttm->num_pages,
                                              false);
                                kvfree(e->user_pages);
                                e->user_pages = NULL;
                        }
 
-                       if (e->robj->tbo.ttm->state != tt_bound &&
+                       if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
                            !e->user_pages) {
                                list_del(&e->tv.head);
                                list_add(&e->tv.head, &need_pages);
index ea0378c..e677851 100644 (file)
@@ -609,6 +609,7 @@ struct amdgpu_ttm_tt {
        spinlock_t              guptasklock;
        struct list_head        guptasks;
        atomic_t                mmu_invalidations;
+       uint32_t                last_set_pages;
        struct list_head        list;
 };
 
@@ -672,8 +673,10 @@ release_pages:
 
 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
 {
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
        unsigned i;
 
+       gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
        for (i = 0; i < ttm->num_pages; ++i) {
                if (ttm->pages[i])
                        put_page(ttm->pages[i]);
@@ -1025,6 +1028,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
        spin_lock_init(&gtt->guptasklock);
        INIT_LIST_HEAD(&gtt->guptasks);
        atomic_set(&gtt->mmu_invalidations, 0);
+       gtt->last_set_pages = 0;
 
        return 0;
 }
@@ -1077,6 +1081,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
        return prev_invalidated != *last_invalidated;
 }
 
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+       if (gtt == NULL || !gtt->userptr)
+               return false;
+
+       return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
+}
+
 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
 {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;