2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/module.h>
27 #include <asm/hypervisor.h>
30 #include <drm/drm_drv.h>
34 #include "amdgpu_ras.h"
39 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
41 vf2pf_info->ucode_info[ucode].id = ucode; \
42 vf2pf_info->ucode_info[ucode].version = ver; \
45 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
47 /* By now all MMIO pages except mailbox are blocked */
48 /* if blocking is enabled in hypervisor. Choose the */
49 /* SCRATCH_REG0 to test. */
50 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
53 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
55 struct drm_device *ddev = adev_to_drm(adev);
57 /* enable virtual display */
58 if (adev->asic_type != CHIP_ALDEBARAN &&
59 adev->asic_type != CHIP_ARCTURUS &&
60 ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
61 if (adev->mode_info.num_crtc == 0)
62 adev->mode_info.num_crtc = 1;
63 adev->enable_virtual_display = true;
65 ddev->driver_features &= ~DRIVER_ATOMIC;
69 /* enable mcbp for sriov */
72 /* Reduce kcq number to 2 to reduce latency */
73 if (amdgpu_num_kcq == -1)
77 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
78 uint32_t reg0, uint32_t reg1,
79 uint32_t ref, uint32_t mask)
81 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
82 struct amdgpu_ring *ring = &kiq->ring;
83 signed long r, cnt = 0;
87 if (adev->mes.ring.sched.ready) {
88 amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
93 spin_lock_irqsave(&kiq->ring_lock, flags);
94 amdgpu_ring_alloc(ring, 32);
95 amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
97 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
101 amdgpu_ring_commit(ring);
102 spin_unlock_irqrestore(&kiq->ring_lock, flags);
104 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
106 /* don't wait anymore for IRQ context */
107 if (r < 1 && in_interrupt())
111 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
113 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
114 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
117 if (cnt > MAX_KIQ_REG_TRY)
123 amdgpu_ring_undo(ring);
124 spin_unlock_irqrestore(&kiq->ring_lock, flags);
126 dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
130 * amdgpu_virt_request_full_gpu() - request full gpu access
131 * @adev: amdgpu device.
132 * @init: is driver init time.
133 * When start to init/fini driver, first need to request full gpu access.
134 * Return: Zero if request success, otherwise will return error.
136 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
138 struct amdgpu_virt *virt = &adev->virt;
141 if (virt->ops && virt->ops->req_full_gpu) {
142 r = virt->ops->req_full_gpu(adev, init);
146 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
153 * amdgpu_virt_release_full_gpu() - release full gpu access
154 * @adev: amdgpu device.
155 * @init: is driver init time.
156 * When finishing driver init/fini, need to release full gpu access.
157 * Return: Zero if release success, otherwise will returen error.
159 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
161 struct amdgpu_virt *virt = &adev->virt;
164 if (virt->ops && virt->ops->rel_full_gpu) {
165 r = virt->ops->rel_full_gpu(adev, init);
169 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
175 * amdgpu_virt_reset_gpu() - reset gpu
176 * @adev: amdgpu device.
177 * Send reset command to GPU hypervisor to reset GPU that VM is using
178 * Return: Zero if reset success, otherwise will return error.
180 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
182 struct amdgpu_virt *virt = &adev->virt;
185 if (virt->ops && virt->ops->reset_gpu) {
186 r = virt->ops->reset_gpu(adev);
190 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
196 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
198 struct amdgpu_virt *virt = &adev->virt;
200 if (virt->ops && virt->ops->req_init_data)
201 virt->ops->req_init_data(adev);
203 if (adev->virt.req_init_data_ver > 0)
204 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
206 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
210 * amdgpu_virt_wait_reset() - wait for reset gpu completed
211 * @adev: amdgpu device.
212 * Wait for GPU reset completed.
213 * Return: Zero if reset success, otherwise will return error.
215 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
217 struct amdgpu_virt *virt = &adev->virt;
219 if (!virt->ops || !virt->ops->wait_reset)
222 return virt->ops->wait_reset(adev);
226 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
227 * @adev: amdgpu device.
228 * MM table is used by UVD and VCE for its initialization
229 * Return: Zero if allocate success.
231 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
235 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
238 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
239 AMDGPU_GEM_DOMAIN_VRAM |
240 AMDGPU_GEM_DOMAIN_GTT,
241 &adev->virt.mm_table.bo,
242 &adev->virt.mm_table.gpu_addr,
243 (void *)&adev->virt.mm_table.cpu_addr);
245 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
249 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
250 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
251 adev->virt.mm_table.gpu_addr,
252 adev->virt.mm_table.cpu_addr);
257 * amdgpu_virt_free_mm_table() - free mm table memory
258 * @adev: amdgpu device.
259 * Free MM table memory
261 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
263 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
266 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
267 &adev->virt.mm_table.gpu_addr,
268 (void *)&adev->virt.mm_table.cpu_addr);
269 adev->virt.mm_table.gpu_addr = 0;
273 unsigned int amd_sriov_msg_checksum(void *obj,
274 unsigned long obj_size,
276 unsigned int checksum)
278 unsigned int ret = key;
283 /* calculate checksum */
284 for (i = 0; i < obj_size; ++i)
286 /* minus the checksum itself */
287 pos = (char *)&checksum;
288 for (i = 0; i < sizeof(checksum); ++i)
293 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
295 struct amdgpu_virt *virt = &adev->virt;
296 struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
297 /* GPU will be marked bad on host if bp count more then 10,
298 * so alloc 512 is enough.
300 unsigned int align_space = 512;
302 struct amdgpu_bo **bps_bo = NULL;
304 *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
308 bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
312 bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
317 (*data)->bps_bo = bps_bo;
319 (*data)->last_reserved = 0;
321 virt->ras_init_done = true;
333 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
335 struct amdgpu_virt *virt = &adev->virt;
336 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
337 struct amdgpu_bo *bo;
343 for (i = data->last_reserved - 1; i >= 0; i--) {
344 bo = data->bps_bo[i];
345 amdgpu_bo_free_kernel(&bo, NULL, NULL);
346 data->bps_bo[i] = bo;
347 data->last_reserved = i;
351 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
353 struct amdgpu_virt *virt = &adev->virt;
354 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
356 virt->ras_init_done = false;
361 amdgpu_virt_ras_release_bp(adev);
366 virt->virt_eh_data = NULL;
369 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
370 struct eeprom_table_record *bps, int pages)
372 struct amdgpu_virt *virt = &adev->virt;
373 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
378 memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
379 data->count += pages;
382 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
384 struct amdgpu_virt *virt = &adev->virt;
385 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
386 struct amdgpu_bo *bo = NULL;
393 for (i = data->last_reserved; i < data->count; i++) {
394 bp = data->bps[i].retired_page;
396 /* There are two cases of reserve error should be ignored:
397 * 1) a ras bad page has been allocated (used by someone);
398 * 2) a ras bad page has been reserved (duplicate error injection
401 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
402 AMDGPU_GPU_PAGE_SIZE,
404 DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
406 data->bps_bo[i] = bo;
407 data->last_reserved = i + 1;
412 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
413 uint64_t retired_page)
415 struct amdgpu_virt *virt = &adev->virt;
416 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
422 for (i = 0; i < data->count; i++)
423 if (retired_page == data->bps[i].retired_page)
429 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
430 uint64_t bp_block_offset, uint32_t bp_block_size)
432 struct eeprom_table_record bp;
433 uint64_t retired_page;
434 uint32_t bp_idx, bp_cnt;
435 void *vram_usage_va = NULL;
437 if (adev->mman.fw_vram_usage_va)
438 vram_usage_va = adev->mman.fw_vram_usage_va;
440 vram_usage_va = adev->mman.drv_vram_usage_va;
443 bp_cnt = bp_block_size / sizeof(uint64_t);
444 for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
445 retired_page = *(uint64_t *)(vram_usage_va +
446 bp_block_offset + bp_idx * sizeof(uint64_t));
447 bp.retired_page = retired_page;
449 if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
452 amdgpu_virt_ras_add_bps(adev, &bp, 1);
454 amdgpu_virt_ras_reserve_bps(adev);
459 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
461 struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
468 if (adev->virt.fw_reserve.p_pf2vf == NULL)
471 if (pf2vf_info->size > 1024) {
472 DRM_ERROR("invalid pf2vf message size\n");
476 switch (pf2vf_info->version) {
478 checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
479 checkval = amd_sriov_msg_checksum(
480 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
481 adev->virt.fw_reserve.checksum_key, checksum);
482 if (checksum != checkval) {
483 DRM_ERROR("invalid pf2vf message\n");
487 adev->virt.gim_feature =
488 ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
491 /* TODO: missing key, need to add it later */
492 checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
493 checkval = amd_sriov_msg_checksum(
494 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
496 if (checksum != checkval) {
497 DRM_ERROR("invalid pf2vf message\n");
501 adev->virt.vf2pf_update_interval_ms =
502 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
503 adev->virt.gim_feature =
504 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
505 adev->virt.reg_access =
506 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
508 adev->virt.decode_max_dimension_pixels = 0;
509 adev->virt.decode_max_frame_pixels = 0;
510 adev->virt.encode_max_dimension_pixels = 0;
511 adev->virt.encode_max_frame_pixels = 0;
512 adev->virt.is_mm_bw_enabled = false;
513 for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
514 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
515 adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
517 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
518 adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
520 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
521 adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
523 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
524 adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
526 if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
527 adev->virt.is_mm_bw_enabled = true;
530 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
533 DRM_ERROR("invalid pf2vf version\n");
537 /* correct too large or too little interval value */
538 if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
539 adev->virt.vf2pf_update_interval_ms = 2000;
544 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
546 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
547 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
549 if (adev->virt.fw_reserve.p_vf2pf == NULL)
552 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
553 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
554 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
555 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
556 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
557 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
558 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
559 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
560 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
561 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
562 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
563 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
564 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
565 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
566 adev->psp.asd_context.bin_desc.fw_version);
567 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
568 adev->psp.ras_context.context.bin_desc.fw_version);
569 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
570 adev->psp.xgmi_context.context.bin_desc.fw_version);
571 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
572 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
573 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
574 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
575 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
578 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
580 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
582 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
584 if (adev->virt.fw_reserve.p_vf2pf == NULL)
587 memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
589 vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
590 vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
593 if (THIS_MODULE->version != NULL)
594 strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
597 strcpy(vf2pf_info->driver_version, "N/A");
599 vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
600 vf2pf_info->driver_cert = 0;
601 vf2pf_info->os_info.all = 0;
603 vf2pf_info->fb_usage =
604 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
605 vf2pf_info->fb_vis_usage =
606 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
607 vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
608 vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
610 amdgpu_virt_populate_vf2pf_ucode_info(adev);
612 /* TODO: read dynamic info */
613 vf2pf_info->gfx_usage = 0;
614 vf2pf_info->compute_usage = 0;
615 vf2pf_info->encode_usage = 0;
616 vf2pf_info->decode_usage = 0;
618 vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
619 vf2pf_info->checksum =
620 amd_sriov_msg_checksum(
621 vf2pf_info, vf2pf_info->header.size, 0, 0);
626 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
628 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
631 ret = amdgpu_virt_read_pf2vf_data(adev);
634 amdgpu_virt_write_vf2pf_data(adev);
637 schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
640 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
642 if (adev->virt.vf2pf_update_interval_ms != 0) {
643 DRM_INFO("clean up the vf2pf work item\n");
644 cancel_delayed_work_sync(&adev->virt.vf2pf_work);
645 adev->virt.vf2pf_update_interval_ms = 0;
649 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
651 adev->virt.fw_reserve.p_pf2vf = NULL;
652 adev->virt.fw_reserve.p_vf2pf = NULL;
653 adev->virt.vf2pf_update_interval_ms = 0;
655 if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
656 DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
657 } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
658 /* go through this logic in ip_init and reset to init workqueue*/
659 amdgpu_virt_exchange_data(adev);
661 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
662 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
663 } else if (adev->bios != NULL) {
664 /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
665 adev->virt.fw_reserve.p_pf2vf =
666 (struct amd_sriov_msg_pf2vf_info_header *)
667 (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
669 amdgpu_virt_read_pf2vf_data(adev);
674 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
676 uint64_t bp_block_offset = 0;
677 uint32_t bp_block_size = 0;
678 struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
680 if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
681 if (adev->mman.fw_vram_usage_va) {
682 adev->virt.fw_reserve.p_pf2vf =
683 (struct amd_sriov_msg_pf2vf_info_header *)
684 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
685 adev->virt.fw_reserve.p_vf2pf =
686 (struct amd_sriov_msg_vf2pf_info_header *)
687 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
688 } else if (adev->mman.drv_vram_usage_va) {
689 adev->virt.fw_reserve.p_pf2vf =
690 (struct amd_sriov_msg_pf2vf_info_header *)
691 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
692 adev->virt.fw_reserve.p_vf2pf =
693 (struct amd_sriov_msg_vf2pf_info_header *)
694 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
697 amdgpu_virt_read_pf2vf_data(adev);
698 amdgpu_virt_write_vf2pf_data(adev);
700 /* bad page handling for version 2 */
701 if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
702 pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
704 bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
705 ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
706 bp_block_size = pf2vf_v2->bp_block_size;
708 if (bp_block_size && !adev->virt.ras_init_done)
709 amdgpu_virt_init_ras_err_handler_data(adev);
711 if (adev->virt.ras_init_done)
712 amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
717 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
721 switch (adev->asic_type) {
724 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
730 case CHIP_SIENNA_CICHLID:
733 case CHIP_IP_DISCOVERY:
734 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
736 default: /* other chip doesn't support SRIOV */
742 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
744 if (reg & 0x80000000)
745 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
748 /* passthrough mode exclus sriov mod */
749 if (is_virtual_machine() && !xen_initial_domain())
750 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
753 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
754 /* VF MMIO access (except mailbox range) from CPU
755 * will be blocked during sriov runtime
757 adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
759 /* we have the ability to check now */
760 if (amdgpu_sriov_vf(adev)) {
761 switch (adev->asic_type) {
764 vi_set_virt_ops(adev);
767 soc15_set_virt_ops(adev);
769 /* not send GPU_INIT_DATA with MS_HYPERV*/
770 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
772 /* send a dummy GPU_INIT_DATA request to host on vega10 */
773 amdgpu_virt_request_init_data(adev);
778 soc15_set_virt_ops(adev);
782 case CHIP_SIENNA_CICHLID:
783 case CHIP_IP_DISCOVERY:
784 nv_set_virt_ops(adev);
785 /* try send GPU_INIT_DATA request to host */
786 amdgpu_virt_request_init_data(adev);
788 default: /* other chip doesn't support SRIOV */
789 DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
795 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
797 return amdgpu_sriov_is_debug(adev) ? true : false;
800 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
802 return amdgpu_sriov_is_normal(adev) ? true : false;
805 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
807 if (!amdgpu_sriov_vf(adev) ||
808 amdgpu_virt_access_debugfs_is_kiq(adev))
811 if (amdgpu_virt_access_debugfs_is_mmio(adev))
812 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
819 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
821 if (amdgpu_sriov_vf(adev))
822 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
825 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
827 enum amdgpu_sriov_vf_mode mode;
829 if (amdgpu_sriov_vf(adev)) {
830 if (amdgpu_sriov_is_pp_one_vf(adev))
831 mode = SRIOV_VF_MODE_ONE_VF;
833 mode = SRIOV_VF_MODE_MULTI_VF;
835 mode = SRIOV_VF_MODE_BARE_METAL;
841 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
843 switch (adev->ip_versions[MP0_HWIP][0]) {
844 case IP_VERSION(13, 0, 0):
845 /* no vf autoload, white list */
846 if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
847 ucode_id == AMDGPU_UCODE_ID_VCN)
851 case IP_VERSION(13, 0, 10):
853 if (ucode_id == AMDGPU_UCODE_ID_CAP
854 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
855 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
856 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
857 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
858 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
859 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
860 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
861 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
862 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
863 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
864 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
865 || ucode_id == AMDGPU_UCODE_ID_CP_MES
866 || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
867 || ucode_id == AMDGPU_UCODE_ID_CP_MES1
868 || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
869 || ucode_id == AMDGPU_UCODE_ID_VCN1
870 || ucode_id == AMDGPU_UCODE_ID_VCN)
875 /* lagacy black list */
876 if (ucode_id == AMDGPU_UCODE_ID_SDMA0
877 || ucode_id == AMDGPU_UCODE_ID_SDMA1
878 || ucode_id == AMDGPU_UCODE_ID_SDMA2
879 || ucode_id == AMDGPU_UCODE_ID_SDMA3
880 || ucode_id == AMDGPU_UCODE_ID_SDMA4
881 || ucode_id == AMDGPU_UCODE_ID_SDMA5
882 || ucode_id == AMDGPU_UCODE_ID_SDMA6
883 || ucode_id == AMDGPU_UCODE_ID_SDMA7
884 || ucode_id == AMDGPU_UCODE_ID_RLC_G
885 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
886 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
887 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
888 || ucode_id == AMDGPU_UCODE_ID_SMC)
895 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
896 struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
897 struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
901 if (!adev->virt.is_mm_bw_enabled)
905 for (i = 0; i < encode_array_size; i++) {
906 encode[i].max_width = adev->virt.encode_max_dimension_pixels;
907 encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
908 if (encode[i].max_width > 0)
909 encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
911 encode[i].max_height = 0;
916 for (i = 0; i < decode_array_size; i++) {
917 decode[i].max_width = adev->virt.decode_max_dimension_pixels;
918 decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
919 if (decode[i].max_width > 0)
920 decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
922 decode[i].max_height = 0;
927 static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
928 u32 acc_flags, u32 hwip,
929 bool write, u32 *rlcg_flag)
935 if (amdgpu_sriov_reg_indirect_gc(adev)) {
937 write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
939 /* only in new version, AMDGPU_REGS_NO_KIQ and
940 * AMDGPU_REGS_RLC are enabled simultaneously */
941 } else if ((acc_flags & AMDGPU_REGS_RLC) &&
942 !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
943 *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
948 if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
949 (acc_flags & AMDGPU_REGS_RLC) && write) {
950 *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
960 static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
962 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
963 uint32_t timeout = 50000;
972 if (!adev->gfx.rlc.rlcg_reg_access_supported) {
974 "indirect registers access through rlcg is not available\n");
978 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
979 scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
980 scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
981 scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
982 scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
983 if (reg_access_ctrl->spare_int)
984 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
986 if (offset == reg_access_ctrl->grbm_cntl) {
987 /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
988 writel(v, scratch_reg2);
989 if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
990 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
991 } else if (offset == reg_access_ctrl->grbm_idx) {
992 /* if the target reg offset is grbm_idx, write to scratch_reg3 */
993 writel(v, scratch_reg3);
994 if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
995 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
998 * SCRATCH_REG0 = read/write value
999 * SCRATCH_REG1[30:28] = command
1000 * SCRATCH_REG1[19:0] = address in dword
1001 * SCRATCH_REG1[26:24] = Error reporting
1003 writel(v, scratch_reg0);
1004 writel((offset | flag), scratch_reg1);
1005 if (reg_access_ctrl->spare_int)
1006 writel(1, spare_int);
1008 for (i = 0; i < timeout; i++) {
1009 tmp = readl(scratch_reg1);
1010 if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
1016 if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
1017 if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
1019 "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
1020 } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
1022 "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
1023 } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
1025 "register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
1028 "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
1032 "timeout: rlcg faled to program reg: 0x%05x\n", offset);
1037 ret = readl(scratch_reg0);
1041 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
1042 u32 offset, u32 value,
1043 u32 acc_flags, u32 hwip)
1047 if (!amdgpu_sriov_runtime(adev) &&
1048 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
1049 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
1053 if (acc_flags & AMDGPU_REGS_NO_KIQ)
1054 WREG32_NO_KIQ(offset, value);
1056 WREG32(offset, value);
1059 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
1060 u32 offset, u32 acc_flags, u32 hwip)
1064 if (!amdgpu_sriov_runtime(adev) &&
1065 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
1066 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
1068 if (acc_flags & AMDGPU_REGS_NO_KIQ)
1069 return RREG32_NO_KIQ(offset);
1071 return RREG32(offset);