2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_xcp.h"
25 #include "gfxhub_v1_2.h"
26 #include "gfxhub_v1_1.h"
28 #include "gc/gc_9_4_3_offset.h"
29 #include "gc/gc_9_4_3_sh_mask.h"
30 #include "vega10_enum.h"
32 #include "soc15_common.h"
34 #define regVM_L2_CNTL3_DEFAULT 0x80100007
35 #define regVM_L2_CNTL4_DEFAULT 0x000000c1
37 static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev)
39 return (u64)RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_FB_OFFSET) << 24;
42 static void gfxhub_v1_2_xcc_setup_vm_pt_regs(struct amdgpu_device *adev,
44 uint64_t page_table_base,
47 struct amdgpu_vmhub *hub;
50 for_each_inst(i, xcc_mask) {
51 hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
52 WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
53 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
54 hub->ctx_addr_distance * vmid,
55 lower_32_bits(page_table_base));
57 WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
58 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
59 hub->ctx_addr_distance * vmid,
60 upper_32_bits(page_table_base));
64 static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
66 uint64_t page_table_base)
70 xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
71 gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, vmid, page_table_base, xcc_mask);
74 static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
80 if (adev->gmc.pdb0_bo)
81 pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
83 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
85 gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, 0, pt_base, xcc_mask);
87 /* If use GART for FB translation, vmid0 page table covers both
88 * vram and system memory (gart)
90 for_each_inst(i, xcc_mask) {
91 if (adev->gmc.pdb0_bo) {
92 WREG32_SOC15(GC, GET_INST(GC, i),
93 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
94 (u32)(adev->gmc.fb_start >> 12));
95 WREG32_SOC15(GC, GET_INST(GC, i),
96 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
97 (u32)(adev->gmc.fb_start >> 44));
99 WREG32_SOC15(GC, GET_INST(GC, i),
100 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
101 (u32)(adev->gmc.gart_end >> 12));
102 WREG32_SOC15(GC, GET_INST(GC, i),
103 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
104 (u32)(adev->gmc.gart_end >> 44));
106 WREG32_SOC15(GC, GET_INST(GC, i),
107 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
108 (u32)(adev->gmc.gart_start >> 12));
109 WREG32_SOC15(GC, GET_INST(GC, i),
110 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
111 (u32)(adev->gmc.gart_start >> 44));
113 WREG32_SOC15(GC, GET_INST(GC, i),
114 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
115 (u32)(adev->gmc.gart_end >> 12));
116 WREG32_SOC15(GC, GET_INST(GC, i),
117 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
118 (u32)(adev->gmc.gart_end >> 44));
124 gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
131 for_each_inst(i, xcc_mask) {
132 /* Program the AGP BAR */
133 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BASE, 0);
134 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
135 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
137 if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
138 /* Program the system aperture low logical page number. */
139 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
140 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
142 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
144 * Raven2 has a HW issue that it is unable to use the
145 * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
146 * So here is the workaround that increase system
147 * aperture high address (add 1) to get rid of the VM
148 * fault and hardware hang.
150 WREG32_SOC15_RLC(GC, GET_INST(GC, i),
151 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
152 max((adev->gmc.fb_end >> 18) + 0x1,
153 adev->gmc.agp_end >> 18));
155 WREG32_SOC15_RLC(GC, GET_INST(GC, i),
156 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
157 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
159 /* Set default page address. */
160 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
161 WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
163 WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
166 /* Program "protection fault". */
167 WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
168 (u32)(adev->dummy_page_addr >> 12));
169 WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
170 (u32)((u64)adev->dummy_page_addr >> 44));
172 tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2);
173 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
174 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
175 WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
178 /* In the case squeezing vram into GART aperture, we don't use
179 * FB aperture and AGP aperture. Disable them.
181 if (adev->gmc.pdb0_bo) {
182 WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
183 WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
184 WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
185 WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, 0xFFFFFF);
186 WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
187 WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
192 static void gfxhub_v1_2_xcc_init_tlb_regs(struct amdgpu_device *adev,
198 for_each_inst(i, xcc_mask) {
199 /* Setup TLB control */
200 tmp = RREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL);
202 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
204 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
205 SYSTEM_ACCESS_MODE, 3);
206 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
207 ENABLE_ADVANCED_DRIVER_MODEL, 1);
208 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
209 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
210 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
211 MTYPE, MTYPE_UC);/* XXX for emulation. */
212 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
214 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL, tmp);
218 static void gfxhub_v1_2_xcc_init_cache_regs(struct amdgpu_device *adev,
224 for_each_inst(i, xcc_mask) {
226 tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL);
227 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
228 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
229 /* XXX for emulation, Refer to closed source code.*/
230 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
232 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
233 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
234 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
235 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL, tmp);
237 tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL2);
238 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
239 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
240 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL2, tmp);
242 tmp = regVM_L2_CNTL3_DEFAULT;
243 if (adev->gmc.translate_further) {
244 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
245 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
246 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
248 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
249 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
250 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
252 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL3, tmp);
254 tmp = regVM_L2_CNTL4_DEFAULT;
255 /* For AMD APP APUs setup WC memory */
256 if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
257 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
258 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
260 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
261 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
263 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL4, tmp);
267 static void gfxhub_v1_2_xcc_enable_system_domain(struct amdgpu_device *adev,
273 for_each_inst(i, xcc_mask) {
274 tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
275 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
276 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
277 adev->gmc.vmid0_page_table_depth);
278 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
279 adev->gmc.vmid0_page_table_block_size);
280 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
281 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
282 WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL, tmp);
287 gfxhub_v1_2_xcc_disable_identity_aperture(struct amdgpu_device *adev,
292 for_each_inst(i, xcc_mask) {
293 WREG32_SOC15(GC, GET_INST(GC, i),
294 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
296 WREG32_SOC15(GC, GET_INST(GC, i),
297 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
300 WREG32_SOC15(GC, GET_INST(GC, i),
301 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
303 WREG32_SOC15(GC, GET_INST(GC, i),
304 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
307 WREG32_SOC15(GC, GET_INST(GC, i),
308 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
309 WREG32_SOC15(GC, GET_INST(GC, i),
310 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
314 static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
317 struct amdgpu_vmhub *hub;
318 unsigned int num_level, block_size;
322 num_level = adev->vm_manager.num_level;
323 block_size = adev->vm_manager.block_size;
324 if (adev->gmc.translate_further)
329 for_each_inst(j, xcc_mask) {
330 hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
331 for (i = 0; i <= 14; i++) {
332 tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, i);
333 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
334 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
336 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
337 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
338 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
339 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
341 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
342 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
343 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
344 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
345 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
346 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
347 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
348 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
349 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
350 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
351 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
352 PAGE_TABLE_BLOCK_SIZE,
354 /* Send no-retry XNACK on fault to suppress VM fault storm.
355 * On 9.4.2 and 9.4.3, XNACK can be enabled in
356 * the SQ per-process.
357 * Retry faults need to be enabled for that to work.
359 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
360 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
361 !adev->gmc.noretry ||
362 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
363 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3));
364 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
365 i * hub->ctx_distance, tmp);
366 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
367 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
368 i * hub->ctx_addr_distance, 0);
369 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
370 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
371 i * hub->ctx_addr_distance, 0);
372 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
373 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
374 i * hub->ctx_addr_distance,
375 lower_32_bits(adev->vm_manager.max_pfn - 1));
376 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
377 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
378 i * hub->ctx_addr_distance,
379 upper_32_bits(adev->vm_manager.max_pfn - 1));
384 static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,
387 struct amdgpu_vmhub *hub;
390 for_each_inst(j, xcc_mask) {
391 hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
393 for (i = 0 ; i < 18; ++i) {
394 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
395 i * hub->eng_addr_distance, 0xffffffff);
396 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
397 i * hub->eng_addr_distance, 0x1f);
402 static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
408 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are
409 * VF copy registers so vbios post doesn't program them, for
410 * SRIOV driver need to program them
412 if (amdgpu_sriov_vf(adev)) {
413 for_each_inst(i, xcc_mask) {
414 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE,
415 adev->gmc.vram_start >> 24);
416 WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP,
417 adev->gmc.vram_end >> 24);
422 gfxhub_v1_2_xcc_init_gart_aperture_regs(adev, xcc_mask);
423 gfxhub_v1_2_xcc_init_system_aperture_regs(adev, xcc_mask);
424 gfxhub_v1_2_xcc_init_tlb_regs(adev, xcc_mask);
425 if (!amdgpu_sriov_vf(adev))
426 gfxhub_v1_2_xcc_init_cache_regs(adev, xcc_mask);
428 gfxhub_v1_2_xcc_enable_system_domain(adev, xcc_mask);
429 if (!amdgpu_sriov_vf(adev))
430 gfxhub_v1_2_xcc_disable_identity_aperture(adev, xcc_mask);
431 gfxhub_v1_2_xcc_setup_vmid_config(adev, xcc_mask);
432 gfxhub_v1_2_xcc_program_invalidation(adev, xcc_mask);
437 static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
441 xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
442 return gfxhub_v1_2_xcc_gart_enable(adev, xcc_mask);
445 static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
448 struct amdgpu_vmhub *hub;
452 for_each_inst(j, xcc_mask) {
453 hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
454 /* Disable all tables */
455 for (i = 0; i < 16; i++)
456 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT0_CNTL,
457 i * hub->ctx_distance, 0);
459 /* Setup TLB control */
460 tmp = RREG32_SOC15(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL);
461 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
462 tmp = REG_SET_FIELD(tmp,
463 MC_VM_MX_L1_TLB_CNTL,
464 ENABLE_ADVANCED_DRIVER_MODEL,
466 WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
469 tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
470 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
471 WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
472 WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
476 static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
480 xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
481 gfxhub_v1_2_xcc_gart_disable(adev, xcc_mask);
484 static void gfxhub_v1_2_xcc_set_fault_enable_default(struct amdgpu_device *adev,
491 for_each_inst(i, xcc_mask) {
492 tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
493 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
494 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
495 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
496 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
497 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
498 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
499 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
500 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
501 tmp = REG_SET_FIELD(tmp,
502 VM_L2_PROTECTION_FAULT_CNTL,
503 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
505 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
506 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
507 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
508 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
509 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
510 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
511 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
512 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
513 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
514 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
515 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
516 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
518 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
519 CRASH_ON_NO_RETRY_FAULT, 1);
520 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
521 CRASH_ON_RETRY_FAULT, 1);
523 WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL, tmp);
528 * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling
530 * @adev: amdgpu_device pointer
531 * @value: true redirects VM faults to the default page
533 static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev,
538 xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
539 gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, xcc_mask);
542 static void gfxhub_v1_2_xcc_init(struct amdgpu_device *adev, uint32_t xcc_mask)
544 struct amdgpu_vmhub *hub;
547 for_each_inst(i, xcc_mask) {
548 hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
550 hub->ctx0_ptb_addr_lo32 =
551 SOC15_REG_OFFSET(GC, GET_INST(GC, i),
552 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
553 hub->ctx0_ptb_addr_hi32 =
554 SOC15_REG_OFFSET(GC, GET_INST(GC, i),
555 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
556 hub->vm_inv_eng0_sem =
557 SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_SEM);
558 hub->vm_inv_eng0_req =
559 SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_REQ);
560 hub->vm_inv_eng0_ack =
561 SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_ACK);
562 hub->vm_context0_cntl =
563 SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
564 hub->vm_l2_pro_fault_status =
565 SOC15_REG_OFFSET(GC, GET_INST(GC, i),
566 regVM_L2_PROTECTION_FAULT_STATUS);
567 hub->vm_l2_pro_fault_cntl =
568 SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
570 hub->ctx_distance = regVM_CONTEXT1_CNTL -
572 hub->ctx_addr_distance =
573 regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
574 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
575 hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
576 regVM_INVALIDATE_ENG0_REQ;
577 hub->eng_addr_distance =
578 regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
579 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
583 static void gfxhub_v1_2_init(struct amdgpu_device *adev)
587 xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
588 gfxhub_v1_2_xcc_init(adev, xcc_mask);
591 static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
593 u32 max_num_physical_nodes;
594 u32 max_physical_node_id;
599 xgmi_lfb_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_CNTL);
600 seg_size = REG_GET_FIELD(
601 RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_SIZE),
602 MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
604 REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
608 max_num_physical_nodes = 8;
609 max_physical_node_id = 7;
611 /* PF_MAX_REGION=0 means xgmi is disabled */
612 if (max_region || adev->gmc.xgmi.connected_to_cpu) {
613 adev->gmc.xgmi.num_physical_nodes = max_region + 1;
615 if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
618 adev->gmc.xgmi.physical_node_id =
619 REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
622 if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
625 adev->gmc.xgmi.node_segment_size = seg_size;
631 const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
632 .get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
633 .setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
634 .gart_enable = gfxhub_v1_2_gart_enable,
635 .gart_disable = gfxhub_v1_2_gart_disable,
636 .set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
637 .init = gfxhub_v1_2_init,
638 .get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
641 static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
643 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
646 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
651 gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, inst_mask);
653 if (!amdgpu_sriov_vf(adev))
654 return gfxhub_v1_2_xcc_gart_enable(adev, inst_mask);
659 static int gfxhub_v1_2_xcp_suspend(void *handle, uint32_t inst_mask)
661 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
663 if (!amdgpu_sriov_vf(adev))
664 gfxhub_v1_2_xcc_gart_disable(adev, inst_mask);
669 struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs = {
670 .suspend = &gfxhub_v1_2_xcp_suspend,
671 .resume = &gfxhub_v1_2_xcp_resume