struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
struct nouveau_mem *node = old_mem->mm_node;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset = node->vma[0].offset;
u64 dst_offset = node->vma[1].offset;
+ int src_tiled = !!node->memtype;
+ int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
int ret;
while (length) {
u32 amount, stride, height;
+ ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
+ if (ret)
+ return ret;
+
amount = min(length, (u64)(4 * 1024 * 1024));
stride = 16 * 4;
height = amount / stride;
- if (old_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
-
+ if (src_tiled) {
BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
} else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
OUT_RING (chan, 1);
}
- if (new_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
-
+ if (dst_tiled) {
BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
} else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
OUT_RING (chan, 1);
}
- ret = RING_SPACE(chan, 14);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, upper_32_bits(dst_offset));