drm/i915/ttm: fix 32b build
authorMatthew Auld <matthew.auld@intel.com>
Tue, 12 Jul 2022 17:40:50 +0000 (18:40 +0100)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Sun, 17 Jul 2022 18:53:47 +0000 (14:53 -0400)
Since segment_pages is no longer a compile time constant, it looks the
DIV_ROUND_UP(node->size, segment_pages) breaks the 32b build. Simplest
is just to use the ULL variant, but really we should need not need more
than u32 for the page alignment (also we are limited by that due to the
sg->length type), so also make it all u32.

Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Fixes: aff1e0b09b54 ("drm/i915/ttm: fix sg_table construction")
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Nirmoy Das <nirmoy.das@linux.intel.com>
Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220712174050.592550-1-matthew.auld@intel.com
(cherry picked from commit 9306b2b2dfce6931241ef804783692cee526599c)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/i915/gem/i915_gem_region.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/i915_scatterlist.c
drivers/gpu/drm/i915/i915_scatterlist.h
drivers/gpu/drm/i915/intel_region_ttm.c
drivers/gpu/drm/i915/intel_region_ttm.h

index f46ee16a323a98aa9191433b60a5df28c67416f5..a4fb577eceb412e3406ad9eb246f2b04f62c10a2 100644 (file)
@@ -60,6 +60,8 @@ __i915_gem_object_create_region(struct intel_memory_region *mem,
        if (page_size)
                default_page_size = page_size;
 
+       /* We should be able to fit a page within an sg entry */
+       GEM_BUG_ON(overflows_type(default_page_size, u32));
        GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
        GEM_BUG_ON(default_page_size < PAGE_SIZE);
 
index d30ebcaec8b9f7e7004846046eb729b7e5b43e6e..8f1bb6a4b7d1fe71d63818c64e3e0a9fcaf94935 100644 (file)
@@ -620,7 +620,7 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
                         struct ttm_resource *res)
 {
        struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
-       u64 page_alignment;
+       u32 page_alignment;
 
        if (!i915_ttm_gtt_binds_lmem(res))
                return i915_ttm_tt_get_st(bo->ttm);
index f63b50b71e10b88159e2db263c15d01c7343c59c..dcc081874ec8de27e2063b4dc4ab53820633fb57 100644 (file)
@@ -79,10 +79,10 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
  */
 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
                                              u64 region_start,
-                                             u64 page_alignment)
+                                             u32 page_alignment)
 {
-       const u64 max_segment = round_down(UINT_MAX, page_alignment);
-       u64 segment_pages = max_segment >> PAGE_SHIFT;
+       const u32 max_segment = round_down(UINT_MAX, page_alignment);
+       const u32 segment_pages = max_segment >> PAGE_SHIFT;
        u64 block_size, offset, prev_end;
        struct i915_refct_sgt *rsgt;
        struct sg_table *st;
@@ -96,7 +96,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
 
        i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
        st = &rsgt->table;
-       if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages),
+       if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
                           GFP_KERNEL)) {
                i915_refct_sgt_put(rsgt);
                return ERR_PTR(-ENOMEM);
@@ -123,7 +123,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
                        st->nents++;
                }
 
-               len = min(block_size, max_segment - sg->length);
+               len = min_t(u64, block_size, max_segment - sg->length);
                sg->length += len;
                sg_dma_len(sg) += len;
 
@@ -155,11 +155,11 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
  */
 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
                                                     u64 region_start,
-                                                    u64 page_alignment)
+                                                    u32 page_alignment)
 {
        struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
        const u64 size = res->num_pages << PAGE_SHIFT;
-       const u64 max_segment = round_down(UINT_MAX, page_alignment);
+       const u32 max_segment = round_down(UINT_MAX, page_alignment);
        struct drm_buddy *mm = bman_res->mm;
        struct list_head *blocks = &bman_res->blocks;
        struct drm_buddy_block *block;
@@ -207,7 +207,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
                                st->nents++;
                        }
 
-                       len = min(block_size, max_segment - sg->length);
+                       len = min_t(u64, block_size, max_segment - sg->length);
                        sg->length += len;
                        sg_dma_len(sg) += len;
 
index b13e4cdea9238f4e3009e45bed086ee8b46f019c..9ddb3e743a3e517dfe5d4006c822d396cef9492d 100644 (file)
@@ -214,10 +214,10 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);
 
 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
                                              u64 region_start,
-                                             u64 page_alignment);
+                                             u32 page_alignment);
 
 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
                                                     u64 region_start,
-                                                    u64 page_alignment);
+                                                    u32 page_alignment);
 
 #endif
index 6873808a70159e68b21e67f17b24b24a77e57362..575d67bc6ffede89d3adda423ee2df4dc635a6c6 100644 (file)
@@ -163,7 +163,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
 struct i915_refct_sgt *
 intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
                                  struct ttm_resource *res,
-                                 u64 page_alignment)
+                                 u32 page_alignment)
 {
        if (mem->is_range_manager) {
                struct ttm_range_mgr_node *range_node =
index 98fba5155619a6b5b871c682ab0db937c150e525..5bb8d8b582ae49c8e9238bcfb7a7e20876f57a13 100644 (file)
@@ -25,7 +25,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem);
 struct i915_refct_sgt *
 intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
                                  struct ttm_resource *res,
-                                 u64 page_alignment);
+                                 u32 page_alignment);
 
 void intel_region_ttm_resource_free(struct intel_memory_region *mem,
                                    struct ttm_resource *res);