1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
7 #include "intel_context.h"
8 #include "intel_gpu_commands.h"
10 #include "intel_gtt.h"
11 #include "intel_migrate.h"
12 #include "intel_ring.h"
14 struct insert_pte_data {
18 #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
20 #define GET_CCS_BYTES(i915, size) (HAS_FLAT_CCS(i915) ? \
21 DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0)
22 static bool engine_supports_migration(struct intel_engine_cs *engine)
28 * We need the ability to prevent aribtration (MI_ARB_ON_OFF),
29 * the ability to write PTE using inline data (MI_STORE_DATA)
30 * and of course the ability to do the block transfer (blits).
32 GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS);
37 static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
38 struct i915_page_table *pt,
41 struct insert_pte_data *d = data;
44 * Insert a dummy PTE into every PT that will map to LMEM to ensure
45 * we have a correctly setup PDE structure for later use.
47 vm->insert_page(vm, 0, d->offset, I915_CACHE_NONE, PTE_LM);
48 GEM_BUG_ON(!pt->is_compact);
52 static void xehpsdv_insert_pte(struct i915_address_space *vm,
53 struct i915_page_table *pt,
56 struct insert_pte_data *d = data;
59 * We are playing tricks here, since the actual pt, from the hw
60 * pov, is only 256bytes with 32 entries, or 4096bytes with 512
61 * entries, but we are still guaranteed that the physical
62 * alignment is 64K underneath for the pt, and we are careful
63 * not to access the space in the void.
65 vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, PTE_LM);
69 static void insert_pte(struct i915_address_space *vm,
70 struct i915_page_table *pt,
73 struct insert_pte_data *d = data;
75 vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
76 i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
77 d->offset += PAGE_SIZE;
80 static struct i915_address_space *migrate_vm(struct intel_gt *gt)
82 struct i915_vm_pt_stash stash = {};
83 struct i915_ppgtt *vm;
88 * We construct a very special VM for use by all migration contexts,
89 * it is kept pinned so that it can be used at any time. As we need
90 * to pre-allocate the page directories for the migration VM, this
91 * limits us to only using a small number of prepared vma.
93 * To be able to pipeline and reschedule migration operations while
94 * avoiding unnecessary contention on the vm itself, the PTE updates
95 * are inline with the blits. All the blits use the same fixed
96 * addresses, with the backing store redirection being updated on the
97 * fly. Only 2 implicit vma are used for all migration operations.
99 * We lay the ppGTT out as:
101 * [0, CHUNK_SZ) -> first object
102 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second object
103 * [2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE
105 * By exposing the dma addresses of the page directories themselves
106 * within the ppGTT, we are then able to rewrite the PTE prior to use.
107 * But the PTE update and subsequent migration operation must be atomic,
108 * i.e. within the same non-preemptible window so that we do not switch
109 * to another migration context that overwrites the PTE.
111 * This changes quite a bit on platforms with HAS_64K_PAGES support,
112 * where we instead have three windows, each CHUNK_SIZE in size. The
113 * first is reserved for mapping system-memory, and that just uses the
114 * 512 entry layout using 4K GTT pages. The other two windows just map
115 * lmem pages and must use the new compact 32 entry layout using 64K GTT
116 * pages, which ensures we can address any lmem object that the user
117 * throws at us. We then also use the xehpsdv_toggle_pdes as a way of
118 * just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the
119 * compact layout for each of these page-tables, that fall within the
120 * [CHUNK_SIZE, 3 * CHUNK_SIZE) range.
122 * We lay the ppGTT out as:
124 * [0, CHUNK_SZ) -> first window/object, maps smem
125 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second window/object, maps lmem src
126 * [2 * CHUNK_SZ, 3 * CHUNK_SZ) -> third window/object, maps lmem dst
128 * For the PTE window it's also quite different, since each PTE must
129 * point to some 64K page, one for each PT(since it's in lmem), and yet
130 * each is only <= 4096bytes, but since the unused space within that PTE
131 * range is never touched, this should be fine.
133 * So basically each PT now needs 64K of virtual memory, instead of 4K,
136 * [3 * CHUNK_SZ, 3 * CHUNK_SZ + ((3 * CHUNK_SZ / SZ_2M) * SZ_64K)] -> PTE
139 vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY);
143 if (!vm->vm.allocate_va_range || !vm->vm.foreach) {
148 if (HAS_64K_PAGES(gt->i915))
149 stash.pt_sz = I915_GTT_PAGE_SIZE_64K;
152 * Each engine instance is assigned its own chunk in the VM, so
153 * that we can run multiple instances concurrently
155 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
156 struct intel_engine_cs *engine;
157 u64 base = (u64)i << 32;
158 struct insert_pte_data d = {};
159 struct i915_gem_ww_ctx ww;
162 engine = gt->engine_class[COPY_ENGINE_CLASS][i];
163 if (!engine_supports_migration(engine))
167 * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need
168 * 4x2 page directories for source/destination.
170 if (HAS_64K_PAGES(gt->i915))
174 d.offset = base + sz;
177 * We need another page directory setup so that we can write
178 * the 8x512 PTE in each chunk.
180 if (HAS_64K_PAGES(gt->i915))
181 sz += (sz / SZ_2M) * SZ_64K;
183 sz += (sz >> 12) * sizeof(u64);
185 err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz);
189 for_i915_gem_ww(&ww, err, true) {
190 err = i915_vm_lock_objects(&vm->vm, &ww);
193 err = i915_vm_map_pt_stash(&vm->vm, &stash);
197 vm->vm.allocate_va_range(&vm->vm, &stash, base, sz);
199 i915_vm_free_pt_stash(&vm->vm, &stash);
203 /* Now allow the GPU to rewrite the PTE via its own ppGTT */
204 if (HAS_64K_PAGES(gt->i915)) {
205 vm->vm.foreach(&vm->vm, base, d.offset - base,
206 xehpsdv_insert_pte, &d);
207 d.offset = base + CHUNK_SZ;
208 vm->vm.foreach(&vm->vm,
211 xehpsdv_toggle_pdes, &d);
213 vm->vm.foreach(&vm->vm, base, d.offset - base,
221 i915_vm_put(&vm->vm);
225 static struct intel_engine_cs *first_copy_engine(struct intel_gt *gt)
227 struct intel_engine_cs *engine;
230 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
231 engine = gt->engine_class[COPY_ENGINE_CLASS][i];
232 if (engine_supports_migration(engine))
239 static struct intel_context *pinned_context(struct intel_gt *gt)
241 static struct lock_class_key key;
242 struct intel_engine_cs *engine;
243 struct i915_address_space *vm;
244 struct intel_context *ce;
246 engine = first_copy_engine(gt);
248 return ERR_PTR(-ENODEV);
254 ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
255 I915_GEM_HWS_MIGRATE,
261 int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt)
263 struct intel_context *ce;
265 memset(m, 0, sizeof(*m));
267 ce = pinned_context(gt);
275 static int random_index(unsigned int max)
277 return upper_32_bits(mul_u32_u32(get_random_u32(), max));
280 static struct intel_context *__migrate_engines(struct intel_gt *gt)
282 struct intel_engine_cs *engines[MAX_ENGINE_INSTANCE];
283 struct intel_engine_cs *engine;
284 unsigned int count, i;
287 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
288 engine = gt->engine_class[COPY_ENGINE_CLASS][i];
289 if (engine_supports_migration(engine))
290 engines[count++] = engine;
293 return intel_context_create(engines[random_index(count)]);
296 struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
298 struct intel_context *ce;
301 * We randomly distribute contexts across the engines upon constrction,
302 * as they all share the same pinned vm, and so in order to allow
303 * multiple blits to run in parallel, we must construct each blit
304 * to use a different range of the vm for its GTT. This has to be
305 * known at construction, so we can not use the late greedy load
306 * balancing of the virtual-engine.
308 ce = __migrate_engines(m->context->engine->gt);
313 ce->ring_size = SZ_256K;
316 ce->vm = i915_vm_get(m->context->vm);
321 static inline struct sgt_dma sg_sgt(struct scatterlist *sg)
323 dma_addr_t addr = sg_dma_address(sg);
325 return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
328 static int emit_no_arbitration(struct i915_request *rq)
332 cs = intel_ring_begin(rq, 2);
336 /* Explicitly disable preemption for this request. */
337 *cs++ = MI_ARB_ON_OFF;
339 intel_ring_advance(rq, cs);
344 static int emit_pte(struct i915_request *rq,
346 enum i915_cache_level cache_level,
351 bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915);
352 const u64 encode = rq->context->vm->pte_encode(0, cache_level,
353 is_lmem ? PTE_LM : 0);
354 struct intel_ring *ring = rq->ring;
355 int pkt, dword_length;
360 GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
362 page_size = I915_GTT_PAGE_SIZE;
363 dword_length = 0x400;
365 /* Compute the page directory offset for the target address range */
367 GEM_BUG_ON(!IS_ALIGNED(offset, SZ_2M));
371 offset += 3 * CHUNK_SZ;
374 page_size = I915_GTT_PAGE_SIZE_64K;
379 offset *= sizeof(u64);
380 offset += 2 * CHUNK_SZ;
383 offset += (u64)rq->engine->instance << 32;
385 cs = intel_ring_begin(rq, 6);
389 /* Pack as many PTE updates as possible into a single MI command */
390 pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5);
391 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
394 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
395 *cs++ = lower_32_bits(offset);
396 *cs++ = upper_32_bits(offset);
399 if (cs - hdr >= pkt) {
402 *hdr += cs - hdr - 2;
405 ring->emit = (void *)cs - ring->vaddr;
406 intel_ring_advance(rq, cs);
407 intel_ring_update_space(ring);
409 cs = intel_ring_begin(rq, 6);
413 dword_rem = dword_length;
415 if (IS_ALIGNED(total, SZ_2M)) {
416 offset = round_up(offset, SZ_64K);
418 dword_rem = SZ_2M - (total & (SZ_2M - 1));
419 dword_rem /= page_size;
424 pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5);
425 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
428 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
429 *cs++ = lower_32_bits(offset);
430 *cs++ = upper_32_bits(offset);
433 GEM_BUG_ON(!IS_ALIGNED(it->dma, page_size));
435 *cs++ = lower_32_bits(encode | it->dma);
436 *cs++ = upper_32_bits(encode | it->dma);
441 it->dma += page_size;
442 if (it->dma >= it->max) {
443 it->sg = __sg_next(it->sg);
444 if (!it->sg || sg_dma_len(it->sg) == 0)
447 it->dma = sg_dma_address(it->sg);
448 it->max = it->dma + sg_dma_len(it->sg);
450 } while (total < length);
452 *hdr += cs - hdr - 2;
455 ring->emit = (void *)cs - ring->vaddr;
456 intel_ring_advance(rq, cs);
457 intel_ring_update_space(ring);
462 static bool wa_1209644611_applies(int ver, u32 size)
464 u32 height = size >> PAGE_SHIFT;
469 return height % 4 == 3 && height <= 8;
473 * DOC: Flat-CCS - Memory compression for Local memory
475 * On Xe-HP and later devices, we use dedicated compression control state (CCS)
476 * stored in local memory for each surface, to support the 3D and media
477 * compression formats.
479 * The memory required for the CCS of the entire local memory is 1/256 of the
480 * local memory size. So before the kernel boot, the required memory is reserved
481 * for the CCS data and a secure register will be programmed with the CCS base
484 * Flat CCS data needs to be cleared when a lmem object is allocated.
485 * And CCS data can be copied in and out of CCS region through
486 * XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly.
488 * I915 supports Flat-CCS on lmem only objects. When an objects has smem in
489 * its preference list, on memory pressure, i915 needs to migrate the lmem
490 * content into smem. If the lmem object is Flat-CCS compressed by userspace,
491 * then i915 needs to decompress it. But I915 lack the required information
492 * for such decompression. Hence I915 supports Flat-CCS only on lmem only objects.
494 * When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can
495 * be temporarily evicted to smem, along with the auxiliary CCS state, where
496 * it can be potentially swapped-out at a later point, if required.
497 * If userspace later touches the evicted pages, then we always move
498 * the backing memory back to lmem, which includes restoring the saved CCS state,
499 * and potentially performing any required swap-in.
501 * For the migration of the lmem objects with smem in placement list, such as
502 * {lmem, smem}, objects are treated as non Flat-CCS capable objects.
505 static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
507 *cmd++ = MI_FLUSH_DW | flags;
514 static u32 calc_ctrl_surf_instr_size(struct drm_i915_private *i915, int size)
516 u32 num_cmds, num_blks, total_size;
518 if (!GET_CCS_BYTES(i915, size))
522 * XY_CTRL_SURF_COPY_BLT transfers CCS in 256 byte
523 * blocks. one XY_CTRL_SURF_COPY_BLT command can
524 * transfer upto 1024 blocks.
526 num_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
527 NUM_CCS_BYTES_PER_BLOCK);
528 num_cmds = DIV_ROUND_UP(num_blks, NUM_CCS_BLKS_PER_XFER);
529 total_size = XY_CTRL_SURF_INSTR_SIZE * num_cmds;
532 * Adding a flush before and after XY_CTRL_SURF_COPY_BLT
534 total_size += 2 * MI_FLUSH_DW_SIZE;
539 static int emit_copy_ccs(struct i915_request *rq,
540 u32 dst_offset, u8 dst_access,
541 u32 src_offset, u8 src_access, int size)
543 struct drm_i915_private *i915 = rq->engine->i915;
544 int mocs = rq->engine->gt->mocs.uc_index << 1;
545 u32 num_ccs_blks, ccs_ring_size;
548 ccs_ring_size = calc_ctrl_surf_instr_size(i915, size);
549 WARN_ON(!ccs_ring_size);
551 cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2));
555 num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
556 NUM_CCS_BYTES_PER_BLOCK);
557 GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
558 cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
561 * The XY_CTRL_SURF_COPY_BLT instruction is used to copy the CCS
562 * data in and out of the CCS region.
564 * We can copy at most 1024 blocks of 256 bytes using one
565 * XY_CTRL_SURF_COPY_BLT instruction.
567 * In case we need to copy more than 1024 blocks, we need to add
568 * another instruction to the same batch buffer.
570 * 1024 blocks of 256 bytes of CCS represent a total 256KB of CCS.
572 * 256 KB of CCS represents 256 * 256 KB = 64 MB of LMEM.
574 *cs++ = XY_CTRL_SURF_COPY_BLT |
575 src_access << SRC_ACCESS_TYPE_SHIFT |
576 dst_access << DST_ACCESS_TYPE_SHIFT |
577 ((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT;
579 *cs++ = rq->engine->instance |
580 FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
582 *cs++ = rq->engine->instance |
583 FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
585 cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
586 if (ccs_ring_size & 1)
589 intel_ring_advance(rq, cs);
594 static int emit_copy(struct i915_request *rq,
595 u32 dst_offset, u32 src_offset, int size)
597 const int ver = GRAPHICS_VER(rq->engine->i915);
598 u32 instance = rq->engine->instance;
601 cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6);
605 if (ver >= 9 && !wa_1209644611_applies(ver, size)) {
606 *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
607 *cs++ = BLT_DEPTH_32 | PAGE_SIZE;
609 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
616 } else if (ver >= 8) {
617 *cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
618 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
620 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
628 GEM_BUG_ON(instance);
629 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
630 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
631 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
637 intel_ring_advance(rq, cs);
641 static int scatter_list_length(struct scatterlist *sg)
645 while (sg && sg_dma_len(sg)) {
646 len += sg_dma_len(sg);
654 calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
655 int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy)
657 if (ccs_bytes_to_cpy) {
660 * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
661 * will be taken for the blt. in Flat-ccs supported
662 * platform Smem obj will have more pages than required
663 * for main meory hence limit it to the required size
666 *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
667 } else { /* ccs handling is not required */
672 static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
677 GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
678 len = it->max - it->dma;
679 if (len > bytes_to_cpy) {
680 it->dma += bytes_to_cpy;
686 it->sg = __sg_next(it->sg);
687 it->dma = sg_dma_address(it->sg);
688 it->max = it->dma + sg_dma_len(it->sg);
689 } while (bytes_to_cpy);
693 intel_context_migrate_copy(struct intel_context *ce,
694 const struct i915_deps *deps,
695 struct scatterlist *src,
696 enum i915_cache_level src_cache_level,
698 struct scatterlist *dst,
699 enum i915_cache_level dst_cache_level,
701 struct i915_request **out)
703 struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
704 struct drm_i915_private *i915 = ce->engine->i915;
705 u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
706 enum i915_cache_level ccs_cache_level;
707 u32 src_offset, dst_offset;
708 u8 src_access, dst_access;
709 struct i915_request *rq;
711 bool ccs_is_src, overwrite_ccs;
714 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
715 GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem));
718 GEM_BUG_ON(ce->ring->size < SZ_64K);
720 src_sz = scatter_list_length(src);
721 bytes_to_cpy = src_sz;
723 if (HAS_FLAT_CCS(i915) && src_is_lmem ^ dst_is_lmem) {
724 src_access = !src_is_lmem && dst_is_lmem;
725 dst_access = !src_access;
727 dst_sz = scatter_list_length(dst);
730 ccs_cache_level = dst_cache_level;
732 } else if (dst_is_lmem) {
733 bytes_to_cpy = dst_sz;
735 ccs_cache_level = src_cache_level;
740 * When there is a eviction of ccs needed smem will have the
741 * extra pages for the ccs data
743 * TO-DO: Want to move the size mismatch check to a WARN_ON,
744 * but still we have some requests of smem->lmem with same size.
747 ccs_bytes_to_cpy = src_sz != dst_sz ? GET_CCS_BYTES(i915, bytes_to_cpy) : 0;
748 if (ccs_bytes_to_cpy)
749 get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
752 overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
755 dst_offset = CHUNK_SZ;
756 if (HAS_64K_PAGES(ce->engine->i915)) {
760 src_offset = CHUNK_SZ;
762 dst_offset = 2 * CHUNK_SZ;
768 rq = i915_request_create(ce);
775 err = i915_request_await_deps(rq, deps);
779 if (rq->engine->emit_init_breadcrumb) {
780 err = rq->engine->emit_init_breadcrumb(rq);
788 /* The PTE updates + copy must not be interrupted. */
789 err = emit_no_arbitration(rq);
793 calculate_chunk_sz(i915, src_is_lmem, &src_sz,
794 bytes_to_cpy, ccs_bytes_to_cpy);
796 len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
807 err = emit_pte(rq, &it_dst, dst_cache_level, dst_is_lmem,
816 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
820 err = emit_copy(rq, dst_offset, src_offset, len);
826 if (ccs_bytes_to_cpy) {
829 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
833 ccs_sz = GET_CCS_BYTES(i915, len);
834 err = emit_pte(rq, &it_ccs, ccs_cache_level, false,
835 ccs_is_src ? src_offset : dst_offset,
844 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
848 err = emit_copy_ccs(rq, dst_offset, dst_access,
849 src_offset, src_access, len);
853 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
856 ccs_bytes_to_cpy -= ccs_sz;
857 } else if (overwrite_ccs) {
858 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
863 * While we can't always restore/manage the CCS state,
864 * we still need to ensure we don't leak the CCS state
865 * from the previous user, so make sure we overwrite it
868 err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
869 dst_offset, DIRECT_ACCESS, len);
873 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
878 /* Arbitration is re-enabled between requests. */
881 i915_request_put(*out);
882 *out = i915_request_get(rq);
883 i915_request_add(rq);
888 if (!bytes_to_cpy && !ccs_bytes_to_cpy) {
890 WARN_ON(it_src.sg && sg_dma_len(it_src.sg));
892 WARN_ON(it_dst.sg && sg_dma_len(it_dst.sg));
896 if (WARN_ON(!it_src.sg || !sg_dma_len(it_src.sg) ||
897 !it_dst.sg || !sg_dma_len(it_dst.sg) ||
898 (ccs_bytes_to_cpy && (!it_ccs.sg ||
899 !sg_dma_len(it_ccs.sg))))) {
911 static int emit_clear(struct i915_request *rq, u32 offset, int size,
912 u32 value, bool is_lmem)
914 struct drm_i915_private *i915 = rq->engine->i915;
915 int mocs = rq->engine->gt->mocs.uc_index << 1;
916 const int ver = GRAPHICS_VER(i915);
920 GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
922 if (HAS_FLAT_CCS(i915) && ver >= 12)
923 ring_sz = XY_FAST_COLOR_BLT_DW;
929 cs = intel_ring_begin(rq, ring_sz);
933 if (HAS_FLAT_CCS(i915) && ver >= 12) {
934 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
935 (XY_FAST_COLOR_BLT_DW - 2);
936 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
939 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
941 *cs++ = rq->engine->instance;
942 *cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
955 } else if (ver >= 8) {
956 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
957 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
959 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
961 *cs++ = rq->engine->instance;
965 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
966 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
968 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
973 intel_ring_advance(rq, cs);
978 intel_context_migrate_clear(struct intel_context *ce,
979 const struct i915_deps *deps,
980 struct scatterlist *sg,
981 enum i915_cache_level cache_level,
984 struct i915_request **out)
986 struct drm_i915_private *i915 = ce->engine->i915;
987 struct sgt_dma it = sg_sgt(sg);
988 struct i915_request *rq;
992 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
995 GEM_BUG_ON(ce->ring->size < SZ_64K);
998 if (HAS_64K_PAGES(i915) && is_lmem)
1004 rq = i915_request_create(ce);
1011 err = i915_request_await_deps(rq, deps);
1015 if (rq->engine->emit_init_breadcrumb) {
1016 err = rq->engine->emit_init_breadcrumb(rq);
1024 /* The PTE updates + clear must not be interrupted. */
1025 err = emit_no_arbitration(rq);
1029 len = emit_pte(rq, &it, cache_level, is_lmem, offset, CHUNK_SZ);
1035 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
1039 err = emit_clear(rq, offset, len, value, is_lmem);
1043 if (HAS_FLAT_CCS(i915) && is_lmem && !value) {
1045 * copy the content of memory into corresponding
1048 err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset,
1049 DIRECT_ACCESS, len);
1054 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
1056 /* Arbitration is re-enabled between requests. */
1059 i915_request_put(*out);
1060 *out = i915_request_get(rq);
1061 i915_request_add(rq);
1062 if (err || !it.sg || !sg_dma_len(it.sg))
1072 int intel_migrate_copy(struct intel_migrate *m,
1073 struct i915_gem_ww_ctx *ww,
1074 const struct i915_deps *deps,
1075 struct scatterlist *src,
1076 enum i915_cache_level src_cache_level,
1078 struct scatterlist *dst,
1079 enum i915_cache_level dst_cache_level,
1081 struct i915_request **out)
1083 struct intel_context *ce;
1090 ce = intel_migrate_create_context(m);
1092 ce = intel_context_get(m->context);
1093 GEM_BUG_ON(IS_ERR(ce));
1095 err = intel_context_pin_ww(ce, ww);
1099 err = intel_context_migrate_copy(ce, deps,
1100 src, src_cache_level, src_is_lmem,
1101 dst, dst_cache_level, dst_is_lmem,
1104 intel_context_unpin(ce);
1106 intel_context_put(ce);
1111 intel_migrate_clear(struct intel_migrate *m,
1112 struct i915_gem_ww_ctx *ww,
1113 const struct i915_deps *deps,
1114 struct scatterlist *sg,
1115 enum i915_cache_level cache_level,
1118 struct i915_request **out)
1120 struct intel_context *ce;
1127 ce = intel_migrate_create_context(m);
1129 ce = intel_context_get(m->context);
1130 GEM_BUG_ON(IS_ERR(ce));
1132 err = intel_context_pin_ww(ce, ww);
1136 err = intel_context_migrate_clear(ce, deps, sg, cache_level,
1137 is_lmem, value, out);
1139 intel_context_unpin(ce);
1141 intel_context_put(ce);
1145 void intel_migrate_fini(struct intel_migrate *m)
1147 struct intel_context *ce;
1149 ce = fetch_and_zero(&m->context);
1153 intel_engine_destroy_pinned_context(ce);
1156 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1157 #include "selftest_migrate.c"