2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "mmhub_v1_8.h"
26 #include "mmhub/mmhub_1_8_0_offset.h"
27 #include "mmhub/mmhub_1_8_0_sh_mask.h"
28 #include "vega10_enum.h"
30 #include "soc15_common.h"
33 #define regVM_L2_CNTL3_DEFAULT 0x80100007
34 #define regVM_L2_CNTL4_DEFAULT 0x000000c1
36 static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
38 u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
39 u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
41 base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
44 top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
47 adev->gmc.fb_start = base;
48 adev->gmc.fb_end = top;
53 static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
54 uint64_t page_table_base)
56 struct amdgpu_vmhub *hub;
60 inst_mask = adev->aid_mask;
61 for_each_inst(i, inst_mask) {
62 hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
63 WREG32_SOC15_OFFSET(MMHUB, i,
64 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
65 hub->ctx_addr_distance * vmid,
66 lower_32_bits(page_table_base));
68 WREG32_SOC15_OFFSET(MMHUB, i,
69 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
70 hub->ctx_addr_distance * vmid,
71 upper_32_bits(page_table_base));
75 static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
81 if (adev->gmc.pdb0_bo)
82 pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
84 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
86 mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base);
88 /* If use GART for FB translation, vmid0 page table covers both
89 * vram and system memory (gart)
91 inst_mask = adev->aid_mask;
92 for_each_inst(i, inst_mask) {
93 if (adev->gmc.pdb0_bo) {
94 WREG32_SOC15(MMHUB, i,
95 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
96 (u32)(adev->gmc.fb_start >> 12));
97 WREG32_SOC15(MMHUB, i,
98 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
99 (u32)(adev->gmc.fb_start >> 44));
101 WREG32_SOC15(MMHUB, i,
102 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
103 (u32)(adev->gmc.gart_end >> 12));
104 WREG32_SOC15(MMHUB, i,
105 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
106 (u32)(adev->gmc.gart_end >> 44));
109 WREG32_SOC15(MMHUB, i,
110 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
111 (u32)(adev->gmc.gart_start >> 12));
112 WREG32_SOC15(MMHUB, i,
113 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
114 (u32)(adev->gmc.gart_start >> 44));
116 WREG32_SOC15(MMHUB, i,
117 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
118 (u32)(adev->gmc.gart_end >> 12));
119 WREG32_SOC15(MMHUB, i,
120 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
121 (u32)(adev->gmc.gart_end >> 44));
126 static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
128 uint32_t tmp, inst_mask;
132 inst_mask = adev->aid_mask;
133 for_each_inst(i, inst_mask) {
134 /* Program the AGP BAR */
135 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0);
136 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT,
137 adev->gmc.agp_start >> 24);
138 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
139 adev->gmc.agp_end >> 24);
141 if (amdgpu_sriov_vf(adev))
144 /* Program the system aperture low logical page number. */
145 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
146 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
148 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
149 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
151 /* In the case squeezing vram into GART aperture, we don't use
152 * FB aperture and AGP aperture. Disable them.
154 if (adev->gmc.pdb0_bo) {
155 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF);
156 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0);
157 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0);
158 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE,
160 WREG32_SOC15(MMHUB, i,
161 regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
163 WREG32_SOC15(MMHUB, i,
164 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
167 /* Set default page address. */
168 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
169 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
171 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
174 /* Program "protection fault". */
175 WREG32_SOC15(MMHUB, i,
176 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
177 (u32)(adev->dummy_page_addr >> 12));
178 WREG32_SOC15(MMHUB, i,
179 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
180 (u32)((u64)adev->dummy_page_addr >> 44));
182 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2);
183 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
184 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
185 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
189 static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
191 uint32_t tmp, inst_mask;
194 /* Setup TLB control */
195 inst_mask = adev->aid_mask;
196 for_each_inst(i, inst_mask) {
197 tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
199 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
201 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
202 SYSTEM_ACCESS_MODE, 3);
203 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
204 ENABLE_ADVANCED_DRIVER_MODEL, 1);
205 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
206 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
207 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
208 MTYPE, MTYPE_UC);/* XXX for emulation. */
209 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
211 WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
215 static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
217 uint32_t tmp, inst_mask;
220 if (amdgpu_sriov_vf(adev))
224 inst_mask = adev->aid_mask;
225 for_each_inst(i, inst_mask) {
226 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL);
227 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
228 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
229 ENABLE_L2_FRAGMENT_PROCESSING, 1);
230 /* XXX for emulation, Refer to closed source code.*/
231 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
232 L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
233 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION,
235 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
236 CONTEXT1_IDENTITY_ACCESS_MODE, 1);
237 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
238 IDENTITY_MODE_FRAGMENT_SIZE, 0);
239 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp);
241 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2);
242 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS,
244 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
245 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp);
247 tmp = regVM_L2_CNTL3_DEFAULT;
248 if (adev->gmc.translate_further) {
249 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
250 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
251 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
253 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
254 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
255 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
257 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp);
259 tmp = regVM_L2_CNTL4_DEFAULT;
260 /* For AMD APP APUs setup WC memory */
261 if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
262 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
263 VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
264 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
265 VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
267 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
268 VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
269 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
270 VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
272 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp);
276 static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev)
278 uint32_t tmp, inst_mask;
281 inst_mask = adev->aid_mask;
282 for_each_inst(i, inst_mask) {
283 tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL);
284 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
285 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
286 adev->gmc.vmid0_page_table_depth);
287 tmp = REG_SET_FIELD(tmp,
288 VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
289 adev->gmc.vmid0_page_table_block_size);
290 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
291 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
292 WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp);
296 static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev)
301 if (amdgpu_sriov_vf(adev))
304 inst_mask = adev->aid_mask;
305 for_each_inst(i, inst_mask) {
306 WREG32_SOC15(MMHUB, i,
307 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
309 WREG32_SOC15(MMHUB, i,
310 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
313 WREG32_SOC15(MMHUB, i,
314 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
316 WREG32_SOC15(MMHUB, i,
317 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
320 WREG32_SOC15(MMHUB, i,
321 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
322 WREG32_SOC15(MMHUB, i,
323 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
327 static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev)
329 struct amdgpu_vmhub *hub;
330 unsigned num_level, block_size;
331 uint32_t tmp, inst_mask;
334 num_level = adev->vm_manager.num_level;
335 block_size = adev->vm_manager.block_size;
336 if (adev->gmc.translate_further)
341 inst_mask = adev->aid_mask;
342 for_each_inst(j, inst_mask) {
343 hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
344 for (i = 0; i <= 14; i++) {
345 tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
347 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
349 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
350 PAGE_TABLE_DEPTH, num_level);
351 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
352 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
353 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
354 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
355 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
356 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
357 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
358 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
359 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
360 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
361 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
362 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
363 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
364 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
365 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
366 PAGE_TABLE_BLOCK_SIZE,
368 /* On 9.4.3, XNACK can be enabled in the SQ
369 * per-process. Retry faults need to be enabled for
372 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
373 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
374 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
375 i * hub->ctx_distance, tmp);
376 WREG32_SOC15_OFFSET(MMHUB, j,
377 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
378 i * hub->ctx_addr_distance, 0);
379 WREG32_SOC15_OFFSET(MMHUB, j,
380 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
381 i * hub->ctx_addr_distance, 0);
382 WREG32_SOC15_OFFSET(MMHUB, j,
383 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
384 i * hub->ctx_addr_distance,
385 lower_32_bits(adev->vm_manager.max_pfn - 1));
386 WREG32_SOC15_OFFSET(MMHUB, j,
387 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
388 i * hub->ctx_addr_distance,
389 upper_32_bits(adev->vm_manager.max_pfn - 1));
394 static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev)
396 struct amdgpu_vmhub *hub;
399 inst_mask = adev->aid_mask;
400 for_each_inst(j, inst_mask) {
401 hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
402 for (i = 0; i < 18; ++i) {
403 WREG32_SOC15_OFFSET(MMHUB, j,
404 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
405 i * hub->eng_addr_distance, 0xffffffff);
406 WREG32_SOC15_OFFSET(MMHUB, j,
407 regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
408 i * hub->eng_addr_distance, 0x1f);
413 static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
415 if (amdgpu_sriov_vf(adev)) {
417 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
418 * VF copy registers so vbios post doesn't program them, for
419 * SRIOV driver need to program them
421 WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE,
422 adev->gmc.vram_start >> 24);
423 WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP,
424 adev->gmc.vram_end >> 24);
428 mmhub_v1_8_init_gart_aperture_regs(adev);
429 mmhub_v1_8_init_system_aperture_regs(adev);
430 mmhub_v1_8_init_tlb_regs(adev);
431 mmhub_v1_8_init_cache_regs(adev);
433 mmhub_v1_8_enable_system_domain(adev);
434 mmhub_v1_8_disable_identity_aperture(adev);
435 mmhub_v1_8_setup_vmid_config(adev);
436 mmhub_v1_8_program_invalidation(adev);
441 static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
443 struct amdgpu_vmhub *hub;
447 /* Disable all tables */
448 inst_mask = adev->aid_mask;
449 for_each_inst(j, inst_mask) {
450 hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
451 for (i = 0; i < 16; i++)
452 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
453 i * hub->ctx_distance, 0);
455 /* Setup TLB control */
456 tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
457 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
459 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
460 ENABLE_ADVANCED_DRIVER_MODEL, 0);
461 WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
463 if (!amdgpu_sriov_vf(adev)) {
465 tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
466 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE,
468 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp);
469 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
475 * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling
477 * @adev: amdgpu_device pointer
478 * @value: true redirects VM faults to the default page
480 static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value)
485 if (amdgpu_sriov_vf(adev))
488 inst_mask = adev->aid_mask;
489 for_each_inst(i, inst_mask) {
490 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL);
491 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
492 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
493 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
494 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
495 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
496 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
497 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
498 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
499 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
500 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
502 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
503 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
504 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
505 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
506 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
507 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
508 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
509 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
510 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
511 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
512 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
513 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
515 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
516 CRASH_ON_NO_RETRY_FAULT, 1);
517 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
518 CRASH_ON_RETRY_FAULT, 1);
521 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
525 static void mmhub_v1_8_init(struct amdgpu_device *adev)
527 struct amdgpu_vmhub *hub;
531 inst_mask = adev->aid_mask;
532 for_each_inst(i, inst_mask) {
533 hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
535 hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i,
536 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
537 hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i,
538 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
539 hub->vm_inv_eng0_req =
540 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ);
541 hub->vm_inv_eng0_ack =
542 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK);
543 hub->vm_context0_cntl =
544 SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL);
545 hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i,
546 regVM_L2_PROTECTION_FAULT_STATUS);
547 hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i,
548 regVM_L2_PROTECTION_FAULT_CNTL);
550 hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
551 hub->ctx_addr_distance =
552 regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
553 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
554 hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
555 regVM_INVALIDATE_ENG0_REQ;
556 hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
557 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
561 static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev,
562 enum amd_clockgating_state state)
567 static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
572 const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
573 .get_fb_location = mmhub_v1_8_get_fb_location,
574 .init = mmhub_v1_8_init,
575 .gart_enable = mmhub_v1_8_gart_enable,
576 .set_fault_enable_default = mmhub_v1_8_set_fault_enable_default,
577 .gart_disable = mmhub_v1_8_gart_disable,
578 .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
579 .set_clockgating = mmhub_v1_8_set_clockgating,
580 .get_clockgating = mmhub_v1_8_get_clockgating,