drm/ttm: Allow page allocations w/o triggering OOM..
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Fri, 22 Dec 2017 13:12:40 +0000 (08:12 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 19 Feb 2018 19:17:58 +0000 (14:17 -0500)
This to allow drivers to choose to avoid OOM invocation and handle
page allocation failures instead.

v2:
Remove extra new lines.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/ttm/ttm_tt.c
include/drm/ttm/ttm_bo_driver.h

index 2fef09a..d33a6bb 100644 (file)
@@ -235,6 +235,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
        if (bdev->need_dma32)
                page_flags |= TTM_PAGE_FLAG_DMA32;
 
+       if (bdev->no_retry)
+               page_flags |= TTM_PAGE_FLAG_NO_RETRY;
+
        switch (bo->type) {
        case ttm_bo_type_device:
                if (zero_alloc)
index 2b12c55..c84da14 100644 (file)
@@ -741,6 +741,9 @@ out:
                if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
                        gfp_flags |= __GFP_ZERO;
 
+               if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
+                       gfp_flags |= __GFP_RETRY_MAYFAIL;
+
                /* ttm_alloc_new_pages doesn't reference pool so we can run
                 * multiple requests in parallel.
                 **/
@@ -893,6 +896,9 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
                if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
                        gfp_flags |= __GFP_ZERO;
 
+               if (flags & TTM_PAGE_FLAG_NO_RETRY)
+                       gfp_flags |= __GFP_RETRY_MAYFAIL;
+
                if (flags & TTM_PAGE_FLAG_DMA32)
                        gfp_flags |= GFP_DMA32;
                else
index a880515..9e90d0e 100644 (file)
@@ -920,6 +920,9 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
                gfp_flags &= ~__GFP_COMP;
        }
 
+       if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+               gfp_flags |= __GFP_RETRY_MAYFAIL;
+
        return gfp_flags;
 }
 
index 5a046a3..9e4d43d 100644 (file)
@@ -301,7 +301,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
        swap_space = swap_storage->f_mapping;
 
        for (i = 0; i < ttm->num_pages; ++i) {
-               from_page = shmem_read_mapping_page(swap_space, i);
+               gfp_t gfp_mask = mapping_gfp_mask(swap_space);
+
+               gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
+               from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
+
                if (IS_ERR(from_page)) {
                        ret = PTR_ERR(from_page);
                        goto out_err;
@@ -350,10 +354,15 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
        swap_space = swap_storage->f_mapping;
 
        for (i = 0; i < ttm->num_pages; ++i) {
+               gfp_t gfp_mask = mapping_gfp_mask(swap_space);
+
+               gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
+
                from_page = ttm->pages[i];
                if (unlikely(from_page == NULL))
                        continue;
-               to_page = shmem_read_mapping_page(swap_space, i);
+
+               to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
                if (IS_ERR(to_page)) {
                        ret = PTR_ERR(to_page);
                        goto out_err;
index 94064b1..9b417eb 100644 (file)
@@ -86,6 +86,7 @@ struct ttm_backend_func {
 #define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
 #define TTM_PAGE_FLAG_DMA32           (1 << 7)
 #define TTM_PAGE_FLAG_SG              (1 << 8)
+#define TTM_PAGE_FLAG_NO_RETRY       (1 << 9)
 
 enum ttm_caching_state {
        tt_uncached,
@@ -556,6 +557,7 @@ struct ttm_bo_global {
  * @dev_mapping: A pointer to the struct address_space representing the
  * device address space.
  * @wq: Work queue structure for the delayed delete workqueue.
+ * @no_retry: Don't retry allocation if it fails
  *
  */
 
@@ -592,6 +594,8 @@ struct ttm_bo_device {
        struct delayed_work wq;
 
        bool need_dma32;
+
+       bool no_retry;
 };
 
 /**