drm/amdgpu: set default num_kcq to 2 under sriov
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/module.h>
25
26 #ifdef CONFIG_X86
27 #include <asm/hypervisor.h>
28 #endif
29
30 #include <drm/drm_drv.h>
31 #include <xen/xen.h>
32
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "vi.h"
36 #include "soc15.h"
37 #include "nv.h"
38
39 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
40         do { \
41                 vf2pf_info->ucode_info[ucode].id = ucode; \
42                 vf2pf_info->ucode_info[ucode].version = ver; \
43         } while (0)
44
45 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
46 {
47         /* By now all MMIO pages except mailbox are blocked */
48         /* if blocking is enabled in hypervisor. Choose the */
49         /* SCRATCH_REG0 to test. */
50         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
51 }
52
53 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
54 {
55         struct drm_device *ddev = adev_to_drm(adev);
56
57         /* enable virtual display */
58         if (adev->asic_type != CHIP_ALDEBARAN &&
59             adev->asic_type != CHIP_ARCTURUS) {
60                 if (adev->mode_info.num_crtc == 0)
61                         adev->mode_info.num_crtc = 1;
62                 adev->enable_virtual_display = true;
63         }
64         ddev->driver_features &= ~DRIVER_ATOMIC;
65         adev->cg_flags = 0;
66         adev->pg_flags = 0;
67
68         /* enable mcbp for sriov */
69         amdgpu_mcbp = 1;
70
71         /* Reduce kcq number to 2 to reduce latency */
72         if (amdgpu_num_kcq == -1)
73                 amdgpu_num_kcq = 2;
74 }
75
76 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
77                                         uint32_t reg0, uint32_t reg1,
78                                         uint32_t ref, uint32_t mask)
79 {
80         struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
81         struct amdgpu_ring *ring = &kiq->ring;
82         signed long r, cnt = 0;
83         unsigned long flags;
84         uint32_t seq;
85
86         if (adev->mes.ring.sched.ready) {
87                 amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
88                                               ref, mask);
89                 return;
90         }
91
92         spin_lock_irqsave(&kiq->ring_lock, flags);
93         amdgpu_ring_alloc(ring, 32);
94         amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
95                                             ref, mask);
96         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
97         if (r)
98                 goto failed_undo;
99
100         amdgpu_ring_commit(ring);
101         spin_unlock_irqrestore(&kiq->ring_lock, flags);
102
103         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
104
105         /* don't wait anymore for IRQ context */
106         if (r < 1 && in_interrupt())
107                 goto failed_kiq;
108
109         might_sleep();
110         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
111
112                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
113                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
114         }
115
116         if (cnt > MAX_KIQ_REG_TRY)
117                 goto failed_kiq;
118
119         return;
120
121 failed_undo:
122         amdgpu_ring_undo(ring);
123         spin_unlock_irqrestore(&kiq->ring_lock, flags);
124 failed_kiq:
125         dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
126 }
127
128 /**
129  * amdgpu_virt_request_full_gpu() - request full gpu access
130  * @adev:       amdgpu device.
131  * @init:       is driver init time.
132  * When start to init/fini driver, first need to request full gpu access.
133  * Return: Zero if request success, otherwise will return error.
134  */
135 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
136 {
137         struct amdgpu_virt *virt = &adev->virt;
138         int r;
139
140         if (virt->ops && virt->ops->req_full_gpu) {
141                 r = virt->ops->req_full_gpu(adev, init);
142                 if (r)
143                         return r;
144
145                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
146         }
147
148         return 0;
149 }
150
151 /**
152  * amdgpu_virt_release_full_gpu() - release full gpu access
153  * @adev:       amdgpu device.
154  * @init:       is driver init time.
155  * When finishing driver init/fini, need to release full gpu access.
156  * Return: Zero if release success, otherwise will returen error.
157  */
158 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
159 {
160         struct amdgpu_virt *virt = &adev->virt;
161         int r;
162
163         if (virt->ops && virt->ops->rel_full_gpu) {
164                 r = virt->ops->rel_full_gpu(adev, init);
165                 if (r)
166                         return r;
167
168                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
169         }
170         return 0;
171 }
172
173 /**
174  * amdgpu_virt_reset_gpu() - reset gpu
175  * @adev:       amdgpu device.
176  * Send reset command to GPU hypervisor to reset GPU that VM is using
177  * Return: Zero if reset success, otherwise will return error.
178  */
179 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
180 {
181         struct amdgpu_virt *virt = &adev->virt;
182         int r;
183
184         if (virt->ops && virt->ops->reset_gpu) {
185                 r = virt->ops->reset_gpu(adev);
186                 if (r)
187                         return r;
188
189                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
190         }
191
192         return 0;
193 }
194
195 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
196 {
197         struct amdgpu_virt *virt = &adev->virt;
198
199         if (virt->ops && virt->ops->req_init_data)
200                 virt->ops->req_init_data(adev);
201
202         if (adev->virt.req_init_data_ver > 0)
203                 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
204         else
205                 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
206 }
207
208 /**
209  * amdgpu_virt_wait_reset() - wait for reset gpu completed
210  * @adev:       amdgpu device.
211  * Wait for GPU reset completed.
212  * Return: Zero if reset success, otherwise will return error.
213  */
214 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
215 {
216         struct amdgpu_virt *virt = &adev->virt;
217
218         if (!virt->ops || !virt->ops->wait_reset)
219                 return -EINVAL;
220
221         return virt->ops->wait_reset(adev);
222 }
223
224 /**
225  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
226  * @adev:       amdgpu device.
227  * MM table is used by UVD and VCE for its initialization
228  * Return: Zero if allocate success.
229  */
230 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
231 {
232         int r;
233
234         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
235                 return 0;
236
237         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
238                                     AMDGPU_GEM_DOMAIN_VRAM |
239                                     AMDGPU_GEM_DOMAIN_GTT,
240                                     &adev->virt.mm_table.bo,
241                                     &adev->virt.mm_table.gpu_addr,
242                                     (void *)&adev->virt.mm_table.cpu_addr);
243         if (r) {
244                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
245                 return r;
246         }
247
248         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
249         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
250                  adev->virt.mm_table.gpu_addr,
251                  adev->virt.mm_table.cpu_addr);
252         return 0;
253 }
254
255 /**
256  * amdgpu_virt_free_mm_table() - free mm table memory
257  * @adev:       amdgpu device.
258  * Free MM table memory
259  */
260 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
261 {
262         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
263                 return;
264
265         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
266                               &adev->virt.mm_table.gpu_addr,
267                               (void *)&adev->virt.mm_table.cpu_addr);
268         adev->virt.mm_table.gpu_addr = 0;
269 }
270
271
272 unsigned int amd_sriov_msg_checksum(void *obj,
273                                 unsigned long obj_size,
274                                 unsigned int key,
275                                 unsigned int checksum)
276 {
277         unsigned int ret = key;
278         unsigned long i = 0;
279         unsigned char *pos;
280
281         pos = (char *)obj;
282         /* calculate checksum */
283         for (i = 0; i < obj_size; ++i)
284                 ret += *(pos + i);
285         /* minus the checksum itself */
286         pos = (char *)&checksum;
287         for (i = 0; i < sizeof(checksum); ++i)
288                 ret -= *(pos + i);
289         return ret;
290 }
291
292 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
293 {
294         struct amdgpu_virt *virt = &adev->virt;
295         struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
296         /* GPU will be marked bad on host if bp count more then 10,
297          * so alloc 512 is enough.
298          */
299         unsigned int align_space = 512;
300         void *bps = NULL;
301         struct amdgpu_bo **bps_bo = NULL;
302
303         *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
304         if (!*data)
305                 goto data_failure;
306
307         bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
308         if (!bps)
309                 goto bps_failure;
310
311         bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
312         if (!bps_bo)
313                 goto bps_bo_failure;
314
315         (*data)->bps = bps;
316         (*data)->bps_bo = bps_bo;
317         (*data)->count = 0;
318         (*data)->last_reserved = 0;
319
320         virt->ras_init_done = true;
321
322         return 0;
323
324 bps_bo_failure:
325         kfree(bps);
326 bps_failure:
327         kfree(*data);
328 data_failure:
329         return -ENOMEM;
330 }
331
332 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
333 {
334         struct amdgpu_virt *virt = &adev->virt;
335         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
336         struct amdgpu_bo *bo;
337         int i;
338
339         if (!data)
340                 return;
341
342         for (i = data->last_reserved - 1; i >= 0; i--) {
343                 bo = data->bps_bo[i];
344                 amdgpu_bo_free_kernel(&bo, NULL, NULL);
345                 data->bps_bo[i] = bo;
346                 data->last_reserved = i;
347         }
348 }
349
350 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
351 {
352         struct amdgpu_virt *virt = &adev->virt;
353         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
354
355         virt->ras_init_done = false;
356
357         if (!data)
358                 return;
359
360         amdgpu_virt_ras_release_bp(adev);
361
362         kfree(data->bps);
363         kfree(data->bps_bo);
364         kfree(data);
365         virt->virt_eh_data = NULL;
366 }
367
368 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
369                 struct eeprom_table_record *bps, int pages)
370 {
371         struct amdgpu_virt *virt = &adev->virt;
372         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
373
374         if (!data)
375                 return;
376
377         memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
378         data->count += pages;
379 }
380
381 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
382 {
383         struct amdgpu_virt *virt = &adev->virt;
384         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
385         struct amdgpu_bo *bo = NULL;
386         uint64_t bp;
387         int i;
388
389         if (!data)
390                 return;
391
392         for (i = data->last_reserved; i < data->count; i++) {
393                 bp = data->bps[i].retired_page;
394
395                 /* There are two cases of reserve error should be ignored:
396                  * 1) a ras bad page has been allocated (used by someone);
397                  * 2) a ras bad page has been reserved (duplicate error injection
398                  *    for one page);
399                  */
400                 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
401                                                AMDGPU_GPU_PAGE_SIZE,
402                                                &bo, NULL))
403                         DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
404
405                 data->bps_bo[i] = bo;
406                 data->last_reserved = i + 1;
407                 bo = NULL;
408         }
409 }
410
411 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
412                 uint64_t retired_page)
413 {
414         struct amdgpu_virt *virt = &adev->virt;
415         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
416         int i;
417
418         if (!data)
419                 return true;
420
421         for (i = 0; i < data->count; i++)
422                 if (retired_page == data->bps[i].retired_page)
423                         return true;
424
425         return false;
426 }
427
428 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
429                 uint64_t bp_block_offset, uint32_t bp_block_size)
430 {
431         struct eeprom_table_record bp;
432         uint64_t retired_page;
433         uint32_t bp_idx, bp_cnt;
434         void *vram_usage_va = NULL;
435
436         if (adev->mman.fw_vram_usage_va)
437                 vram_usage_va = adev->mman.fw_vram_usage_va;
438         else
439                 vram_usage_va = adev->mman.drv_vram_usage_va;
440
441         if (bp_block_size) {
442                 bp_cnt = bp_block_size / sizeof(uint64_t);
443                 for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
444                         retired_page = *(uint64_t *)(vram_usage_va +
445                                         bp_block_offset + bp_idx * sizeof(uint64_t));
446                         bp.retired_page = retired_page;
447
448                         if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
449                                 continue;
450
451                         amdgpu_virt_ras_add_bps(adev, &bp, 1);
452
453                         amdgpu_virt_ras_reserve_bps(adev);
454                 }
455         }
456 }
457
458 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
459 {
460         struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
461         uint32_t checksum;
462         uint32_t checkval;
463
464         uint32_t i;
465         uint32_t tmp;
466
467         if (adev->virt.fw_reserve.p_pf2vf == NULL)
468                 return -EINVAL;
469
470         if (pf2vf_info->size > 1024) {
471                 DRM_ERROR("invalid pf2vf message size\n");
472                 return -EINVAL;
473         }
474
475         switch (pf2vf_info->version) {
476         case 1:
477                 checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
478                 checkval = amd_sriov_msg_checksum(
479                         adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
480                         adev->virt.fw_reserve.checksum_key, checksum);
481                 if (checksum != checkval) {
482                         DRM_ERROR("invalid pf2vf message\n");
483                         return -EINVAL;
484                 }
485
486                 adev->virt.gim_feature =
487                         ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
488                 break;
489         case 2:
490                 /* TODO: missing key, need to add it later */
491                 checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
492                 checkval = amd_sriov_msg_checksum(
493                         adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
494                         0, checksum);
495                 if (checksum != checkval) {
496                         DRM_ERROR("invalid pf2vf message\n");
497                         return -EINVAL;
498                 }
499
500                 adev->virt.vf2pf_update_interval_ms =
501                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
502                 adev->virt.gim_feature =
503                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
504                 adev->virt.reg_access =
505                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
506
507                 adev->virt.decode_max_dimension_pixels = 0;
508                 adev->virt.decode_max_frame_pixels = 0;
509                 adev->virt.encode_max_dimension_pixels = 0;
510                 adev->virt.encode_max_frame_pixels = 0;
511                 adev->virt.is_mm_bw_enabled = false;
512                 for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
513                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
514                         adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
515
516                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
517                         adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
518
519                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
520                         adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
521
522                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
523                         adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
524                 }
525                 if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
526                         adev->virt.is_mm_bw_enabled = true;
527
528                 adev->unique_id =
529                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
530                 break;
531         default:
532                 DRM_ERROR("invalid pf2vf version\n");
533                 return -EINVAL;
534         }
535
536         /* correct too large or too little interval value */
537         if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
538                 adev->virt.vf2pf_update_interval_ms = 2000;
539
540         return 0;
541 }
542
543 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
544 {
545         struct amd_sriov_msg_vf2pf_info *vf2pf_info;
546         vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
547
548         if (adev->virt.fw_reserve.p_vf2pf == NULL)
549                 return;
550
551         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
552         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
553         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
554         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
555         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
556         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
557         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
558         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
559         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
560         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
561         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
562         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
563         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_IMU,      adev->gfx.imu_fw_version);
564         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
565         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
566                             adev->psp.asd_context.bin_desc.fw_version);
567         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
568                             adev->psp.ras_context.context.bin_desc.fw_version);
569         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
570                             adev->psp.xgmi_context.context.bin_desc.fw_version);
571         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
572         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
573         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
574         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
575         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
576 }
577
578 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
579 {
580         struct amd_sriov_msg_vf2pf_info *vf2pf_info;
581
582         vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
583
584         if (adev->virt.fw_reserve.p_vf2pf == NULL)
585                 return -EINVAL;
586
587         memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
588
589         vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
590         vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
591
592 #ifdef MODULE
593         if (THIS_MODULE->version != NULL)
594                 strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
595         else
596 #endif
597                 strcpy(vf2pf_info->driver_version, "N/A");
598
599         vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
600         vf2pf_info->driver_cert = 0;
601         vf2pf_info->os_info.all = 0;
602
603         vf2pf_info->fb_usage =
604                 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
605         vf2pf_info->fb_vis_usage =
606                 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
607         vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
608         vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
609
610         amdgpu_virt_populate_vf2pf_ucode_info(adev);
611
612         /* TODO: read dynamic info */
613         vf2pf_info->gfx_usage = 0;
614         vf2pf_info->compute_usage = 0;
615         vf2pf_info->encode_usage = 0;
616         vf2pf_info->decode_usage = 0;
617
618         vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
619         vf2pf_info->checksum =
620                 amd_sriov_msg_checksum(
621                 vf2pf_info, vf2pf_info->header.size, 0, 0);
622
623         return 0;
624 }
625
626 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
627 {
628         struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
629         int ret;
630
631         ret = amdgpu_virt_read_pf2vf_data(adev);
632         if (ret)
633                 goto out;
634         amdgpu_virt_write_vf2pf_data(adev);
635
636 out:
637         schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
638 }
639
640 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
641 {
642         if (adev->virt.vf2pf_update_interval_ms != 0) {
643                 DRM_INFO("clean up the vf2pf work item\n");
644                 cancel_delayed_work_sync(&adev->virt.vf2pf_work);
645                 adev->virt.vf2pf_update_interval_ms = 0;
646         }
647 }
648
649 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
650 {
651         adev->virt.fw_reserve.p_pf2vf = NULL;
652         adev->virt.fw_reserve.p_vf2pf = NULL;
653         adev->virt.vf2pf_update_interval_ms = 0;
654
655         if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
656                 DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
657         } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
658                 /* go through this logic in ip_init and reset to init workqueue*/
659                 amdgpu_virt_exchange_data(adev);
660
661                 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
662                 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
663         } else if (adev->bios != NULL) {
664                 /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
665                 adev->virt.fw_reserve.p_pf2vf =
666                         (struct amd_sriov_msg_pf2vf_info_header *)
667                         (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
668
669                 amdgpu_virt_read_pf2vf_data(adev);
670         }
671 }
672
673
674 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
675 {
676         uint64_t bp_block_offset = 0;
677         uint32_t bp_block_size = 0;
678         struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
679
680         if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
681                 if (adev->mman.fw_vram_usage_va) {
682                         adev->virt.fw_reserve.p_pf2vf =
683                                 (struct amd_sriov_msg_pf2vf_info_header *)
684                                 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
685                         adev->virt.fw_reserve.p_vf2pf =
686                                 (struct amd_sriov_msg_vf2pf_info_header *)
687                                 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
688                 } else if (adev->mman.drv_vram_usage_va) {
689                         adev->virt.fw_reserve.p_pf2vf =
690                                 (struct amd_sriov_msg_pf2vf_info_header *)
691                                 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
692                         adev->virt.fw_reserve.p_vf2pf =
693                                 (struct amd_sriov_msg_vf2pf_info_header *)
694                                 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
695                 }
696
697                 amdgpu_virt_read_pf2vf_data(adev);
698                 amdgpu_virt_write_vf2pf_data(adev);
699
700                 /* bad page handling for version 2 */
701                 if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
702                         pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
703
704                         bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
705                                 ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
706                         bp_block_size = pf2vf_v2->bp_block_size;
707
708                         if (bp_block_size && !adev->virt.ras_init_done)
709                                 amdgpu_virt_init_ras_err_handler_data(adev);
710
711                         if (adev->virt.ras_init_done)
712                                 amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
713                 }
714         }
715 }
716
717 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
718 {
719         uint32_t reg;
720
721         switch (adev->asic_type) {
722         case CHIP_TONGA:
723         case CHIP_FIJI:
724                 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
725                 break;
726         case CHIP_VEGA10:
727         case CHIP_VEGA20:
728         case CHIP_NAVI10:
729         case CHIP_NAVI12:
730         case CHIP_SIENNA_CICHLID:
731         case CHIP_ARCTURUS:
732         case CHIP_ALDEBARAN:
733         case CHIP_IP_DISCOVERY:
734                 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
735                 break;
736         default: /* other chip doesn't support SRIOV */
737                 reg = 0;
738                 break;
739         }
740
741         if (reg & 1)
742                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
743
744         if (reg & 0x80000000)
745                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
746
747         if (!reg) {
748                 /* passthrough mode exclus sriov mod */
749                 if (is_virtual_machine() && !xen_initial_domain())
750                         adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
751         }
752
753         if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
754                 /* VF MMIO access (except mailbox range) from CPU
755                  * will be blocked during sriov runtime
756                  */
757                 adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
758
759         /* we have the ability to check now */
760         if (amdgpu_sriov_vf(adev)) {
761                 switch (adev->asic_type) {
762                 case CHIP_TONGA:
763                 case CHIP_FIJI:
764                         vi_set_virt_ops(adev);
765                         break;
766                 case CHIP_VEGA10:
767                         soc15_set_virt_ops(adev);
768 #ifdef CONFIG_X86
769                         /* not send GPU_INIT_DATA with MS_HYPERV*/
770                         if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
771 #endif
772                                 /* send a dummy GPU_INIT_DATA request to host on vega10 */
773                                 amdgpu_virt_request_init_data(adev);
774                         break;
775                 case CHIP_VEGA20:
776                 case CHIP_ARCTURUS:
777                 case CHIP_ALDEBARAN:
778                         soc15_set_virt_ops(adev);
779                         break;
780                 case CHIP_NAVI10:
781                 case CHIP_NAVI12:
782                 case CHIP_SIENNA_CICHLID:
783                 case CHIP_IP_DISCOVERY:
784                         nv_set_virt_ops(adev);
785                         /* try send GPU_INIT_DATA request to host */
786                         amdgpu_virt_request_init_data(adev);
787                         break;
788                 default: /* other chip doesn't support SRIOV */
789                         DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
790                         break;
791                 }
792         }
793 }
794
795 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
796 {
797         return amdgpu_sriov_is_debug(adev) ? true : false;
798 }
799
800 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
801 {
802         return amdgpu_sriov_is_normal(adev) ? true : false;
803 }
804
805 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
806 {
807         if (!amdgpu_sriov_vf(adev) ||
808             amdgpu_virt_access_debugfs_is_kiq(adev))
809                 return 0;
810
811         if (amdgpu_virt_access_debugfs_is_mmio(adev))
812                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
813         else
814                 return -EPERM;
815
816         return 0;
817 }
818
819 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
820 {
821         if (amdgpu_sriov_vf(adev))
822                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
823 }
824
825 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
826 {
827         enum amdgpu_sriov_vf_mode mode;
828
829         if (amdgpu_sriov_vf(adev)) {
830                 if (amdgpu_sriov_is_pp_one_vf(adev))
831                         mode = SRIOV_VF_MODE_ONE_VF;
832                 else
833                         mode = SRIOV_VF_MODE_MULTI_VF;
834         } else {
835                 mode = SRIOV_VF_MODE_BARE_METAL;
836         }
837
838         return mode;
839 }
840
841 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
842 {
843         switch (adev->ip_versions[MP0_HWIP][0]) {
844         case IP_VERSION(13, 0, 0):
845                 /* no vf autoload, white list */
846                 if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
847                     ucode_id == AMDGPU_UCODE_ID_VCN)
848                         return false;
849                 else
850                         return true;
851         case IP_VERSION(13, 0, 10):
852                 /* white list */
853                 if (ucode_id == AMDGPU_UCODE_ID_CAP
854                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
855                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
856                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
857                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
858                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
859                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
860                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
861                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
862                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
863                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
864                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
865                 || ucode_id == AMDGPU_UCODE_ID_CP_MES
866                 || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
867                 || ucode_id == AMDGPU_UCODE_ID_CP_MES1
868                 || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
869                 || ucode_id == AMDGPU_UCODE_ID_VCN1
870                 || ucode_id == AMDGPU_UCODE_ID_VCN)
871                         return false;
872                 else
873                         return true;
874         default:
875                 /* lagacy black list */
876                 if (ucode_id == AMDGPU_UCODE_ID_SDMA0
877                     || ucode_id == AMDGPU_UCODE_ID_SDMA1
878                     || ucode_id == AMDGPU_UCODE_ID_SDMA2
879                     || ucode_id == AMDGPU_UCODE_ID_SDMA3
880                     || ucode_id == AMDGPU_UCODE_ID_SDMA4
881                     || ucode_id == AMDGPU_UCODE_ID_SDMA5
882                     || ucode_id == AMDGPU_UCODE_ID_SDMA6
883                     || ucode_id == AMDGPU_UCODE_ID_SDMA7
884                     || ucode_id == AMDGPU_UCODE_ID_RLC_G
885                     || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
886                     || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
887                     || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
888                     || ucode_id == AMDGPU_UCODE_ID_SMC)
889                         return true;
890                 else
891                         return false;
892         }
893 }
894
895 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
896                         struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
897                         struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
898 {
899         uint32_t i;
900
901         if (!adev->virt.is_mm_bw_enabled)
902                 return;
903
904         if (encode) {
905                 for (i = 0; i < encode_array_size; i++) {
906                         encode[i].max_width = adev->virt.encode_max_dimension_pixels;
907                         encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
908                         if (encode[i].max_width > 0)
909                                 encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
910                         else
911                                 encode[i].max_height = 0;
912                 }
913         }
914
915         if (decode) {
916                 for (i = 0; i < decode_array_size; i++) {
917                         decode[i].max_width = adev->virt.decode_max_dimension_pixels;
918                         decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
919                         if (decode[i].max_width > 0)
920                                 decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
921                         else
922                                 decode[i].max_height = 0;
923                 }
924         }
925 }
926
927 static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
928                                                  u32 acc_flags, u32 hwip,
929                                                  bool write, u32 *rlcg_flag)
930 {
931         bool ret = false;
932
933         switch (hwip) {
934         case GC_HWIP:
935                 if (amdgpu_sriov_reg_indirect_gc(adev)) {
936                         *rlcg_flag =
937                                 write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
938                         ret = true;
939                 /* only in new version, AMDGPU_REGS_NO_KIQ and
940                  * AMDGPU_REGS_RLC are enabled simultaneously */
941                 } else if ((acc_flags & AMDGPU_REGS_RLC) &&
942                                 !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
943                         *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
944                         ret = true;
945                 }
946                 break;
947         case MMHUB_HWIP:
948                 if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
949                     (acc_flags & AMDGPU_REGS_RLC) && write) {
950                         *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
951                         ret = true;
952                 }
953                 break;
954         default:
955                 break;
956         }
957         return ret;
958 }
959
960 static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
961 {
962         struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
963         uint32_t timeout = 50000;
964         uint32_t i, tmp;
965         uint32_t ret = 0;
966         void *scratch_reg0;
967         void *scratch_reg1;
968         void *scratch_reg2;
969         void *scratch_reg3;
970         void *spare_int;
971
972         if (!adev->gfx.rlc.rlcg_reg_access_supported) {
973                 dev_err(adev->dev,
974                         "indirect registers access through rlcg is not available\n");
975                 return 0;
976         }
977
978         reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
979         scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
980         scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
981         scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
982         scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
983         if (reg_access_ctrl->spare_int)
984                 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
985
986         if (offset == reg_access_ctrl->grbm_cntl) {
987                 /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
988                 writel(v, scratch_reg2);
989                 if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
990                         writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
991         } else if (offset == reg_access_ctrl->grbm_idx) {
992                 /* if the target reg offset is grbm_idx, write to scratch_reg3 */
993                 writel(v, scratch_reg3);
994                 if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
995                         writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
996         } else {
997                 /*
998                  * SCRATCH_REG0         = read/write value
999                  * SCRATCH_REG1[30:28]  = command
1000                  * SCRATCH_REG1[19:0]   = address in dword
1001                  * SCRATCH_REG1[26:24]  = Error reporting
1002                  */
1003                 writel(v, scratch_reg0);
1004                 writel((offset | flag), scratch_reg1);
1005                 if (reg_access_ctrl->spare_int)
1006                         writel(1, spare_int);
1007
1008                 for (i = 0; i < timeout; i++) {
1009                         tmp = readl(scratch_reg1);
1010                         if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
1011                                 break;
1012                         udelay(10);
1013                 }
1014
1015                 if (i >= timeout) {
1016                         if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
1017                                 if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
1018                                         dev_err(adev->dev,
1019                                                 "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
1020                                 } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
1021                                         dev_err(adev->dev,
1022                                                 "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
1023                                 } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
1024                                         dev_err(adev->dev,
1025                                                 "register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
1026                                 } else {
1027                                         dev_err(adev->dev,
1028                                                 "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
1029                                 }
1030                         } else {
1031                                 dev_err(adev->dev,
1032                                         "timeout: rlcg faled to program reg: 0x%05x\n", offset);
1033                         }
1034                 }
1035         }
1036
1037         ret = readl(scratch_reg0);
1038         return ret;
1039 }
1040
1041 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
1042                        u32 offset, u32 value,
1043                        u32 acc_flags, u32 hwip)
1044 {
1045         u32 rlcg_flag;
1046
1047         if (!amdgpu_sriov_runtime(adev) &&
1048                 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
1049                 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
1050                 return;
1051         }
1052
1053         if (acc_flags & AMDGPU_REGS_NO_KIQ)
1054                 WREG32_NO_KIQ(offset, value);
1055         else
1056                 WREG32(offset, value);
1057 }
1058
1059 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
1060                       u32 offset, u32 acc_flags, u32 hwip)
1061 {
1062         u32 rlcg_flag;
1063
1064         if (!amdgpu_sriov_runtime(adev) &&
1065                 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
1066                 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
1067
1068         if (acc_flags & AMDGPU_REGS_NO_KIQ)
1069                 return RREG32_NO_KIQ(offset);
1070         else
1071                 return RREG32(offset);
1072 }