Merge branch 'i2c/for-5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/module.h>
25
26 #include <drm/drm_drv.h>
27
28 #include "amdgpu.h"
29
30 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
31 {
32         /* By now all MMIO pages except mailbox are blocked */
33         /* if blocking is enabled in hypervisor. Choose the */
34         /* SCRATCH_REG0 to test. */
35         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
36 }
37
38 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
39 {
40         /* enable virtual display */
41         if (adev->mode_info.num_crtc == 0)
42                 adev->mode_info.num_crtc = 1;
43         adev->enable_virtual_display = true;
44         adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
45         adev->cg_flags = 0;
46         adev->pg_flags = 0;
47 }
48
49 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
50                                         uint32_t reg0, uint32_t reg1,
51                                         uint32_t ref, uint32_t mask)
52 {
53         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
54         struct amdgpu_ring *ring = &kiq->ring;
55         signed long r, cnt = 0;
56         unsigned long flags;
57         uint32_t seq;
58
59         spin_lock_irqsave(&kiq->ring_lock, flags);
60         amdgpu_ring_alloc(ring, 32);
61         amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
62                                             ref, mask);
63         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
64         if (r)
65                 goto failed_undo;
66
67         amdgpu_ring_commit(ring);
68         spin_unlock_irqrestore(&kiq->ring_lock, flags);
69
70         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
71
72         /* don't wait anymore for IRQ context */
73         if (r < 1 && in_interrupt())
74                 goto failed_kiq;
75
76         might_sleep();
77         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
78
79                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
80                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
81         }
82
83         if (cnt > MAX_KIQ_REG_TRY)
84                 goto failed_kiq;
85
86         return;
87
88 failed_undo:
89         amdgpu_ring_undo(ring);
90         spin_unlock_irqrestore(&kiq->ring_lock, flags);
91 failed_kiq:
92         pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
93 }
94
95 /**
96  * amdgpu_virt_request_full_gpu() - request full gpu access
97  * @amdgpu:     amdgpu device.
98  * @init:       is driver init time.
99  * When start to init/fini driver, first need to request full gpu access.
100  * Return: Zero if request success, otherwise will return error.
101  */
102 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
103 {
104         struct amdgpu_virt *virt = &adev->virt;
105         int r;
106
107         if (virt->ops && virt->ops->req_full_gpu) {
108                 r = virt->ops->req_full_gpu(adev, init);
109                 if (r)
110                         return r;
111
112                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
113         }
114
115         return 0;
116 }
117
118 /**
119  * amdgpu_virt_release_full_gpu() - release full gpu access
120  * @amdgpu:     amdgpu device.
121  * @init:       is driver init time.
122  * When finishing driver init/fini, need to release full gpu access.
123  * Return: Zero if release success, otherwise will returen error.
124  */
125 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
126 {
127         struct amdgpu_virt *virt = &adev->virt;
128         int r;
129
130         if (virt->ops && virt->ops->rel_full_gpu) {
131                 r = virt->ops->rel_full_gpu(adev, init);
132                 if (r)
133                         return r;
134
135                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
136         }
137         return 0;
138 }
139
140 /**
141  * amdgpu_virt_reset_gpu() - reset gpu
142  * @amdgpu:     amdgpu device.
143  * Send reset command to GPU hypervisor to reset GPU that VM is using
144  * Return: Zero if reset success, otherwise will return error.
145  */
146 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
147 {
148         struct amdgpu_virt *virt = &adev->virt;
149         int r;
150
151         if (virt->ops && virt->ops->reset_gpu) {
152                 r = virt->ops->reset_gpu(adev);
153                 if (r)
154                         return r;
155
156                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
157         }
158
159         return 0;
160 }
161
162 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
163 {
164         struct amdgpu_virt *virt = &adev->virt;
165
166         if (virt->ops && virt->ops->req_init_data)
167                 virt->ops->req_init_data(adev);
168
169         if (adev->virt.req_init_data_ver > 0)
170                 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
171         else
172                 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
173 }
174
175 /**
176  * amdgpu_virt_wait_reset() - wait for reset gpu completed
177  * @amdgpu:     amdgpu device.
178  * Wait for GPU reset completed.
179  * Return: Zero if reset success, otherwise will return error.
180  */
181 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
182 {
183         struct amdgpu_virt *virt = &adev->virt;
184
185         if (!virt->ops || !virt->ops->wait_reset)
186                 return -EINVAL;
187
188         return virt->ops->wait_reset(adev);
189 }
190
191 /**
192  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
193  * @amdgpu:     amdgpu device.
194  * MM table is used by UVD and VCE for its initialization
195  * Return: Zero if allocate success.
196  */
197 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
198 {
199         int r;
200
201         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
202                 return 0;
203
204         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
205                                     AMDGPU_GEM_DOMAIN_VRAM,
206                                     &adev->virt.mm_table.bo,
207                                     &adev->virt.mm_table.gpu_addr,
208                                     (void *)&adev->virt.mm_table.cpu_addr);
209         if (r) {
210                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
211                 return r;
212         }
213
214         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
215         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
216                  adev->virt.mm_table.gpu_addr,
217                  adev->virt.mm_table.cpu_addr);
218         return 0;
219 }
220
221 /**
222  * amdgpu_virt_free_mm_table() - free mm table memory
223  * @amdgpu:     amdgpu device.
224  * Free MM table memory
225  */
226 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
227 {
228         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
229                 return;
230
231         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
232                               &adev->virt.mm_table.gpu_addr,
233                               (void *)&adev->virt.mm_table.cpu_addr);
234         adev->virt.mm_table.gpu_addr = 0;
235 }
236
237
238 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
239                                         unsigned long obj_size,
240                                         unsigned int key,
241                                         unsigned int chksum)
242 {
243         unsigned int ret = key;
244         unsigned long i = 0;
245         unsigned char *pos;
246
247         pos = (char *)obj;
248         /* calculate checksum */
249         for (i = 0; i < obj_size; ++i)
250                 ret += *(pos + i);
251         /* minus the chksum itself */
252         pos = (char *)&chksum;
253         for (i = 0; i < sizeof(chksum); ++i)
254                 ret -= *(pos + i);
255         return ret;
256 }
257
258 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
259 {
260         uint32_t pf2vf_size = 0;
261         uint32_t checksum = 0;
262         uint32_t checkval;
263         char *str;
264
265         adev->virt.fw_reserve.p_pf2vf = NULL;
266         adev->virt.fw_reserve.p_vf2pf = NULL;
267
268         if (adev->fw_vram_usage.va != NULL) {
269                 adev->virt.fw_reserve.p_pf2vf =
270                         (struct amd_sriov_msg_pf2vf_info_header *)(
271                         adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
272                 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
273                 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
274                 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
275
276                 /* pf2vf message must be in 4K */
277                 if (pf2vf_size > 0 && pf2vf_size < 4096) {
278                         checkval = amdgpu_virt_fw_reserve_get_checksum(
279                                 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
280                                 adev->virt.fw_reserve.checksum_key, checksum);
281                         if (checkval == checksum) {
282                                 adev->virt.fw_reserve.p_vf2pf =
283                                         ((void *)adev->virt.fw_reserve.p_pf2vf +
284                                         pf2vf_size);
285                                 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
286                                         sizeof(amdgim_vf2pf_info));
287                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
288                                         AMDGPU_FW_VRAM_VF2PF_VER);
289                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
290                                         sizeof(amdgim_vf2pf_info));
291                                 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
292                                         &str);
293 #ifdef MODULE
294                                 if (THIS_MODULE->version != NULL)
295                                         strcpy(str, THIS_MODULE->version);
296                                 else
297 #endif
298                                         strcpy(str, "N/A");
299                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
300                                         0);
301                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
302                                         amdgpu_virt_fw_reserve_get_checksum(
303                                         adev->virt.fw_reserve.p_vf2pf,
304                                         pf2vf_size,
305                                         adev->virt.fw_reserve.checksum_key, 0));
306                         }
307                 }
308         }
309 }
310
311 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
312 {
313         uint32_t reg;
314
315         switch (adev->asic_type) {
316         case CHIP_TONGA:
317         case CHIP_FIJI:
318                 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
319                 break;
320         case CHIP_VEGA10:
321         case CHIP_VEGA20:
322         case CHIP_NAVI10:
323         case CHIP_NAVI12:
324         case CHIP_ARCTURUS:
325                 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
326                 break;
327         default: /* other chip doesn't support SRIOV */
328                 reg = 0;
329                 break;
330         }
331
332         if (reg & 1)
333                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
334
335         if (reg & 0x80000000)
336                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
337
338         if (!reg) {
339                 if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
340                         adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
341         }
342 }
343
344 bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
345 {
346         return amdgpu_sriov_is_debug(adev) ? true : false;
347 }
348
349 bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
350 {
351         return amdgpu_sriov_is_normal(adev) ? true : false;
352 }
353
354 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
355 {
356         if (!amdgpu_sriov_vf(adev) ||
357             amdgpu_virt_access_debugfs_is_kiq(adev))
358                 return 0;
359
360         if (amdgpu_virt_access_debugfs_is_mmio(adev))
361                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
362         else
363                 return -EPERM;
364
365         return 0;
366 }
367
368 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
369 {
370         if (amdgpu_sriov_vf(adev))
371                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
372 }
373
374 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
375 {
376         enum amdgpu_sriov_vf_mode mode;
377
378         if (amdgpu_sriov_vf(adev)) {
379                 if (amdgpu_sriov_is_pp_one_vf(adev))
380                         mode = SRIOV_VF_MODE_ONE_VF;
381                 else
382                         mode = SRIOV_VF_MODE_MULTI_VF;
383         } else {
384                 mode = SRIOV_VF_MODE_BARE_METAL;
385         }
386
387         return mode;
388 }