2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/module.h>
26 #include <drm/drm_drv.h>
30 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
32 /* By now all MMIO pages except mailbox are blocked */
33 /* if blocking is enabled in hypervisor. Choose the */
34 /* SCRATCH_REG0 to test. */
35 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
38 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
40 /* enable virtual display */
41 if (adev->mode_info.num_crtc == 0)
42 adev->mode_info.num_crtc = 1;
43 adev->enable_virtual_display = true;
44 adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
49 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
50 uint32_t reg0, uint32_t reg1,
51 uint32_t ref, uint32_t mask)
53 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
54 struct amdgpu_ring *ring = &kiq->ring;
55 signed long r, cnt = 0;
59 spin_lock_irqsave(&kiq->ring_lock, flags);
60 amdgpu_ring_alloc(ring, 32);
61 amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
63 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
67 amdgpu_ring_commit(ring);
68 spin_unlock_irqrestore(&kiq->ring_lock, flags);
70 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
72 /* don't wait anymore for IRQ context */
73 if (r < 1 && in_interrupt())
77 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
79 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
80 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
83 if (cnt > MAX_KIQ_REG_TRY)
89 amdgpu_ring_undo(ring);
90 spin_unlock_irqrestore(&kiq->ring_lock, flags);
92 pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
96 * amdgpu_virt_request_full_gpu() - request full gpu access
97 * @amdgpu: amdgpu device.
98 * @init: is driver init time.
99 * When start to init/fini driver, first need to request full gpu access.
100 * Return: Zero if request success, otherwise will return error.
102 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
104 struct amdgpu_virt *virt = &adev->virt;
107 if (virt->ops && virt->ops->req_full_gpu) {
108 r = virt->ops->req_full_gpu(adev, init);
112 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
119 * amdgpu_virt_release_full_gpu() - release full gpu access
120 * @amdgpu: amdgpu device.
121 * @init: is driver init time.
122 * When finishing driver init/fini, need to release full gpu access.
123 * Return: Zero if release success, otherwise will returen error.
125 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
127 struct amdgpu_virt *virt = &adev->virt;
130 if (virt->ops && virt->ops->rel_full_gpu) {
131 r = virt->ops->rel_full_gpu(adev, init);
135 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
141 * amdgpu_virt_reset_gpu() - reset gpu
142 * @amdgpu: amdgpu device.
143 * Send reset command to GPU hypervisor to reset GPU that VM is using
144 * Return: Zero if reset success, otherwise will return error.
146 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
148 struct amdgpu_virt *virt = &adev->virt;
151 if (virt->ops && virt->ops->reset_gpu) {
152 r = virt->ops->reset_gpu(adev);
156 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
162 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
164 struct amdgpu_virt *virt = &adev->virt;
166 if (virt->ops && virt->ops->req_init_data)
167 virt->ops->req_init_data(adev);
169 if (adev->virt.req_init_data_ver > 0)
170 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
172 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
176 * amdgpu_virt_wait_reset() - wait for reset gpu completed
177 * @amdgpu: amdgpu device.
178 * Wait for GPU reset completed.
179 * Return: Zero if reset success, otherwise will return error.
181 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
183 struct amdgpu_virt *virt = &adev->virt;
185 if (!virt->ops || !virt->ops->wait_reset)
188 return virt->ops->wait_reset(adev);
192 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
193 * @amdgpu: amdgpu device.
194 * MM table is used by UVD and VCE for its initialization
195 * Return: Zero if allocate success.
197 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
201 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
204 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
205 AMDGPU_GEM_DOMAIN_VRAM,
206 &adev->virt.mm_table.bo,
207 &adev->virt.mm_table.gpu_addr,
208 (void *)&adev->virt.mm_table.cpu_addr);
210 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
214 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
215 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
216 adev->virt.mm_table.gpu_addr,
217 adev->virt.mm_table.cpu_addr);
222 * amdgpu_virt_free_mm_table() - free mm table memory
223 * @amdgpu: amdgpu device.
224 * Free MM table memory
226 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
228 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
231 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
232 &adev->virt.mm_table.gpu_addr,
233 (void *)&adev->virt.mm_table.cpu_addr);
234 adev->virt.mm_table.gpu_addr = 0;
238 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
239 unsigned long obj_size,
243 unsigned int ret = key;
248 /* calculate checksum */
249 for (i = 0; i < obj_size; ++i)
251 /* minus the chksum itself */
252 pos = (char *)&chksum;
253 for (i = 0; i < sizeof(chksum); ++i)
258 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
260 uint32_t pf2vf_size = 0;
261 uint32_t checksum = 0;
265 adev->virt.fw_reserve.p_pf2vf = NULL;
266 adev->virt.fw_reserve.p_vf2pf = NULL;
268 if (adev->fw_vram_usage.va != NULL) {
269 adev->virt.fw_reserve.p_pf2vf =
270 (struct amd_sriov_msg_pf2vf_info_header *)(
271 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
272 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
273 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
274 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
276 /* pf2vf message must be in 4K */
277 if (pf2vf_size > 0 && pf2vf_size < 4096) {
278 checkval = amdgpu_virt_fw_reserve_get_checksum(
279 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
280 adev->virt.fw_reserve.checksum_key, checksum);
281 if (checkval == checksum) {
282 adev->virt.fw_reserve.p_vf2pf =
283 ((void *)adev->virt.fw_reserve.p_pf2vf +
285 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
286 sizeof(amdgim_vf2pf_info));
287 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
288 AMDGPU_FW_VRAM_VF2PF_VER);
289 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
290 sizeof(amdgim_vf2pf_info));
291 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
294 if (THIS_MODULE->version != NULL)
295 strcpy(str, THIS_MODULE->version);
299 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
301 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
302 amdgpu_virt_fw_reserve_get_checksum(
303 adev->virt.fw_reserve.p_vf2pf,
305 adev->virt.fw_reserve.checksum_key, 0));
311 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
315 switch (adev->asic_type) {
318 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
325 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
327 default: /* other chip doesn't support SRIOV */
333 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
335 if (reg & 0x80000000)
336 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
339 if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
340 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
344 bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
346 return amdgpu_sriov_is_debug(adev) ? true : false;
349 bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
351 return amdgpu_sriov_is_normal(adev) ? true : false;
354 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
356 if (!amdgpu_sriov_vf(adev) ||
357 amdgpu_virt_access_debugfs_is_kiq(adev))
360 if (amdgpu_virt_access_debugfs_is_mmio(adev))
361 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
368 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
370 if (amdgpu_sriov_vf(adev))
371 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
374 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
376 enum amdgpu_sriov_vf_mode mode;
378 if (amdgpu_sriov_vf(adev)) {
379 if (amdgpu_sriov_is_pp_one_vf(adev))
380 mode = SRIOV_VF_MODE_ONE_VF;
382 mode = SRIOV_VF_MODE_MULTI_VF;
384 mode = SRIOV_VF_MODE_BARE_METAL;