drm/amdgpu: make mcbp a per device setting
authorAlex Deucher <alexander.deucher@amd.com>
Fri, 16 Jun 2023 20:49:04 +0000 (16:49 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 30 Jun 2023 17:12:14 +0000 (13:12 -0400)
So we can selectively enable it on certain devices.  No
intended functional change.

Reviewed-and-tested-by: Jiadong Zhu <Jiadong.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c

index eda0a59..a700fe0 100644 (file)
@@ -2552,7 +2552,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
                        adev->ip_blocks[i].status.hw = true;
 
                        /* right after GMC hw init, we create CSA */
-                       if (amdgpu_mcbp) {
+                       if (adev->gfx.mcbp) {
                                r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
                                                               AMDGPU_GEM_DOMAIN_VRAM |
                                                               AMDGPU_GEM_DOMAIN_GTT,
@@ -3673,6 +3673,18 @@ static const struct attribute *amdgpu_dev_attributes[] = {
        NULL
 };
 
+static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
+{
+       if (amdgpu_mcbp == 1)
+               adev->gfx.mcbp = true;
+
+       if (amdgpu_sriov_vf(adev))
+               adev->gfx.mcbp = true;
+
+       if (adev->gfx.mcbp)
+               DRM_INFO("MCBP is enabled\n");
+}
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -3824,9 +3836,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
        DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
 
-       if (amdgpu_mcbp)
-               DRM_INFO("MCBP is enabled\n");
-
        /*
         * Reset domain needs to be present early, before XGMI hive discovered
         * (if any) and intitialized to use reset sem and in_gpu reset flag
@@ -3852,6 +3861,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (r)
                return r;
 
+       amdgpu_device_set_mcbp(adev);
+
        /* Get rid of things like offb */
        r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
        if (r)
index ce0f7a8..a4ff515 100644 (file)
@@ -434,6 +434,7 @@ struct amdgpu_gfx {
        uint16_t                        xcc_mask;
        uint32_t                        num_xcc_per_xcp;
        struct mutex                    partition_mutex;
+       bool                            mcbp; /* mid command buffer preemption */
 };
 
 struct amdgpu_gfx_ras_reg_entry {
index e3531aa..cca5a49 100644 (file)
@@ -805,7 +805,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                dev_info->ids_flags = 0;
                if (adev->flags & AMD_IS_APU)
                        dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
-               if (amdgpu_mcbp)
+               if (adev->gfx.mcbp)
                        dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
                if (amdgpu_is_tmz(adev))
                        dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
@@ -1247,7 +1247,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                goto error_vm;
        }
 
-       if (amdgpu_mcbp) {
+       if (adev->gfx.mcbp) {
                uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
 
                r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
index 78ec342..dacf281 100644 (file)
@@ -72,7 +72,7 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
        int r;
 
        /* don't enable OS preemption on SDMA under SRIOV */
-       if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
+       if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
                return 0;
 
        if (ring->is_mes_queue) {
index 25b4d7f..41aa853 100644 (file)
@@ -66,9 +66,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
        adev->cg_flags = 0;
        adev->pg_flags = 0;
 
-       /* enable mcbp for sriov */
-       amdgpu_mcbp = 1;
-
        /* Reduce kcq number to 2 to reduce latency */
        if (amdgpu_num_kcq == -1)
                amdgpu_num_kcq = 2;
index be984f8..44af802 100644 (file)
@@ -8307,7 +8307,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 
        control |= ib->length_dw | (vmid << 24);
 
-       if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+       if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
                control |= INDIRECT_BUFFER_PRE_ENB(1);
 
                if (flags & AMDGPU_IB_PREEMPTED)
@@ -8482,7 +8482,7 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
 {
        uint32_t dw2 = 0;
 
-       if (amdgpu_mcbp)
+       if (ring->adev->gfx.mcbp)
                gfx_v10_0_ring_emit_ce_meta(ring,
                                    (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
 
index 690e121..3a7af59 100644 (file)
@@ -5311,7 +5311,7 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 
        control |= ib->length_dw | (vmid << 24);
 
-       if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+       if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
                control |= INDIRECT_BUFFER_PRE_ENB(1);
 
                if (flags & AMDGPU_IB_PREEMPTED)