2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "mmhub_v1_8.h"
26 #include "mmhub/mmhub_1_8_0_offset.h"
27 #include "mmhub/mmhub_1_8_0_sh_mask.h"
28 #include "vega10_enum.h"
30 #include "soc15_common.h"
33 #define regVM_L2_CNTL3_DEFAULT 0x80100007
34 #define regVM_L2_CNTL4_DEFAULT 0x000000c1
36 static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
38 u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
39 u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
41 base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
44 top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
47 adev->gmc.fb_start = base;
48 adev->gmc.fb_end = top;
53 static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
54 uint64_t page_table_base)
56 struct amdgpu_vmhub *hub;
60 inst_mask = adev->aid_mask;
61 for_each_inst(i, inst_mask) {
62 hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
63 WREG32_SOC15_OFFSET(MMHUB, i,
64 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
65 hub->ctx_addr_distance * vmid,
66 lower_32_bits(page_table_base));
68 WREG32_SOC15_OFFSET(MMHUB, i,
69 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
70 hub->ctx_addr_distance * vmid,
71 upper_32_bits(page_table_base));
75 static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
81 if (adev->gmc.pdb0_bo)
82 pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
84 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
86 mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base);
88 /* If use GART for FB translation, vmid0 page table covers both
89 * vram and system memory (gart)
91 inst_mask = adev->aid_mask;
92 for_each_inst(i, inst_mask) {
93 if (adev->gmc.pdb0_bo) {
94 WREG32_SOC15(MMHUB, i,
95 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
96 (u32)(adev->gmc.fb_start >> 12));
97 WREG32_SOC15(MMHUB, i,
98 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
99 (u32)(adev->gmc.fb_start >> 44));
101 WREG32_SOC15(MMHUB, i,
102 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
103 (u32)(adev->gmc.gart_end >> 12));
104 WREG32_SOC15(MMHUB, i,
105 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
106 (u32)(adev->gmc.gart_end >> 44));
109 WREG32_SOC15(MMHUB, i,
110 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
111 (u32)(adev->gmc.gart_start >> 12));
112 WREG32_SOC15(MMHUB, i,
113 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
114 (u32)(adev->gmc.gart_start >> 44));
116 WREG32_SOC15(MMHUB, i,
117 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
118 (u32)(adev->gmc.gart_end >> 12));
119 WREG32_SOC15(MMHUB, i,
120 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
121 (u32)(adev->gmc.gart_end >> 44));
126 static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
128 uint32_t tmp, inst_mask;
132 inst_mask = adev->aid_mask;
133 for_each_inst(i, inst_mask) {
134 /* Program the AGP BAR */
135 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0);
136 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT,
137 adev->gmc.agp_start >> 24);
138 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
139 adev->gmc.agp_end >> 24);
141 if (amdgpu_sriov_vf(adev))
144 /* Program the system aperture low logical page number. */
145 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
146 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
148 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
149 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
151 /* In the case squeezing vram into GART aperture, we don't use
152 * FB aperture and AGP aperture. Disable them.
154 if (adev->gmc.pdb0_bo) {
155 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF);
156 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0);
157 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0);
158 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE,
160 WREG32_SOC15(MMHUB, i,
161 regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
163 WREG32_SOC15(MMHUB, i,
164 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
167 /* Set default page address. */
168 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
169 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
171 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
174 /* Program "protection fault". */
175 WREG32_SOC15(MMHUB, i,
176 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
177 (u32)(adev->dummy_page_addr >> 12));
178 WREG32_SOC15(MMHUB, i,
179 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
180 (u32)((u64)adev->dummy_page_addr >> 44));
182 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2);
183 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
184 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
185 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
189 static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
191 uint32_t tmp, inst_mask;
194 /* Setup TLB control */
195 inst_mask = adev->aid_mask;
196 for_each_inst(i, inst_mask) {
197 tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
199 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
201 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
202 SYSTEM_ACCESS_MODE, 3);
203 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
204 ENABLE_ADVANCED_DRIVER_MODEL, 1);
205 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
206 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
207 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
208 MTYPE, MTYPE_UC);/* XXX for emulation. */
209 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
211 WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
215 static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
217 uint32_t tmp, inst_mask;
220 if (amdgpu_sriov_vf(adev))
224 inst_mask = adev->aid_mask;
225 for_each_inst(i, inst_mask) {
226 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL);
227 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
228 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
229 ENABLE_L2_FRAGMENT_PROCESSING, 1);
230 /* XXX for emulation, Refer to closed source code.*/
231 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
232 L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
233 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION,
235 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
236 CONTEXT1_IDENTITY_ACCESS_MODE, 1);
237 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
238 IDENTITY_MODE_FRAGMENT_SIZE, 0);
239 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp);
241 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2);
242 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS,
244 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
245 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp);
247 tmp = regVM_L2_CNTL3_DEFAULT;
248 if (adev->gmc.translate_further) {
249 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
250 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
251 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
253 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
254 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
255 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
257 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp);
259 tmp = regVM_L2_CNTL4_DEFAULT;
260 if (adev->gmc.xgmi.connected_to_cpu) {
261 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
262 VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
263 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
264 VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
266 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
267 VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
268 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
269 VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
271 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp);
275 static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev)
277 uint32_t tmp, inst_mask;
280 inst_mask = adev->aid_mask;
281 for_each_inst(i, inst_mask) {
282 tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL);
283 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
284 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
285 adev->gmc.vmid0_page_table_depth);
286 tmp = REG_SET_FIELD(tmp,
287 VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
288 adev->gmc.vmid0_page_table_block_size);
289 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
290 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
291 WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp);
295 static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev)
300 if (amdgpu_sriov_vf(adev))
303 inst_mask = adev->aid_mask;
304 for_each_inst(i, inst_mask) {
305 WREG32_SOC15(MMHUB, i,
306 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
308 WREG32_SOC15(MMHUB, i,
309 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
312 WREG32_SOC15(MMHUB, i,
313 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
315 WREG32_SOC15(MMHUB, i,
316 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
319 WREG32_SOC15(MMHUB, i,
320 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
321 WREG32_SOC15(MMHUB, i,
322 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
326 static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev)
328 struct amdgpu_vmhub *hub;
329 unsigned num_level, block_size;
330 uint32_t tmp, inst_mask;
333 num_level = adev->vm_manager.num_level;
334 block_size = adev->vm_manager.block_size;
335 if (adev->gmc.translate_further)
340 inst_mask = adev->aid_mask;
341 for_each_inst(j, inst_mask) {
342 hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
343 for (i = 0; i <= 14; i++) {
344 tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
346 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
348 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
349 PAGE_TABLE_DEPTH, num_level);
350 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
351 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
352 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
353 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
354 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
355 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
356 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
357 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
358 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
359 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
360 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
361 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
362 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
363 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
364 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
365 PAGE_TABLE_BLOCK_SIZE,
367 /* On 9.4.3, XNACK can be enabled in the SQ
368 * per-process. Retry faults need to be enabled for
371 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
372 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
373 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
374 i * hub->ctx_distance, tmp);
375 WREG32_SOC15_OFFSET(MMHUB, j,
376 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
377 i * hub->ctx_addr_distance, 0);
378 WREG32_SOC15_OFFSET(MMHUB, j,
379 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
380 i * hub->ctx_addr_distance, 0);
381 WREG32_SOC15_OFFSET(MMHUB, j,
382 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
383 i * hub->ctx_addr_distance,
384 lower_32_bits(adev->vm_manager.max_pfn - 1));
385 WREG32_SOC15_OFFSET(MMHUB, j,
386 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
387 i * hub->ctx_addr_distance,
388 upper_32_bits(adev->vm_manager.max_pfn - 1));
393 static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev)
395 struct amdgpu_vmhub *hub;
398 inst_mask = adev->aid_mask;
399 for_each_inst(j, inst_mask) {
400 hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
401 for (i = 0; i < 18; ++i) {
402 WREG32_SOC15_OFFSET(MMHUB, j,
403 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
404 i * hub->eng_addr_distance, 0xffffffff);
405 WREG32_SOC15_OFFSET(MMHUB, j,
406 regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
407 i * hub->eng_addr_distance, 0x1f);
412 static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
414 if (amdgpu_sriov_vf(adev)) {
416 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
417 * VF copy registers so vbios post doesn't program them, for
418 * SRIOV driver need to program them
420 WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE,
421 adev->gmc.vram_start >> 24);
422 WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP,
423 adev->gmc.vram_end >> 24);
427 mmhub_v1_8_init_gart_aperture_regs(adev);
428 mmhub_v1_8_init_system_aperture_regs(adev);
429 mmhub_v1_8_init_tlb_regs(adev);
430 mmhub_v1_8_init_cache_regs(adev);
432 mmhub_v1_8_enable_system_domain(adev);
433 mmhub_v1_8_disable_identity_aperture(adev);
434 mmhub_v1_8_setup_vmid_config(adev);
435 mmhub_v1_8_program_invalidation(adev);
440 static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
442 struct amdgpu_vmhub *hub;
446 /* Disable all tables */
447 inst_mask = adev->aid_mask;
448 for_each_inst(j, inst_mask) {
449 hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
450 for (i = 0; i < 16; i++)
451 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
452 i * hub->ctx_distance, 0);
454 /* Setup TLB control */
455 tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
456 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
458 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
459 ENABLE_ADVANCED_DRIVER_MODEL, 0);
460 WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
462 if (!amdgpu_sriov_vf(adev)) {
464 tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
465 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE,
467 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp);
468 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
474 * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling
476 * @adev: amdgpu_device pointer
477 * @value: true redirects VM faults to the default page
479 static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value)
484 if (amdgpu_sriov_vf(adev))
487 inst_mask = adev->aid_mask;
488 for_each_inst(i, inst_mask) {
489 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL);
490 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
491 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
492 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
493 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
494 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
495 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
496 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
497 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
498 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
499 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
501 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
502 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
503 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
504 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
505 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
506 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
507 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
508 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
509 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
510 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
511 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
512 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
514 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
515 CRASH_ON_NO_RETRY_FAULT, 1);
516 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
517 CRASH_ON_RETRY_FAULT, 1);
520 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
524 static void mmhub_v1_8_init(struct amdgpu_device *adev)
526 struct amdgpu_vmhub *hub;
530 inst_mask = adev->aid_mask;
531 for_each_inst(i, inst_mask) {
532 hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
534 hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i,
535 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
536 hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i,
537 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
538 hub->vm_inv_eng0_req =
539 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ);
540 hub->vm_inv_eng0_ack =
541 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK);
542 hub->vm_context0_cntl =
543 SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL);
544 hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i,
545 regVM_L2_PROTECTION_FAULT_STATUS);
546 hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i,
547 regVM_L2_PROTECTION_FAULT_CNTL);
549 hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
550 hub->ctx_addr_distance =
551 regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
552 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
553 hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
554 regVM_INVALIDATE_ENG0_REQ;
555 hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
556 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
560 static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev,
561 enum amd_clockgating_state state)
566 static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
571 const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
572 .get_fb_location = mmhub_v1_8_get_fb_location,
573 .init = mmhub_v1_8_init,
574 .gart_enable = mmhub_v1_8_gart_enable,
575 .set_fault_enable_default = mmhub_v1_8_set_fault_enable_default,
576 .gart_disable = mmhub_v1_8_gart_disable,
577 .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
578 .set_clockgating = mmhub_v1_8_set_clockgating,
579 .get_clockgating = mmhub_v1_8_get_clockgating,