drm/amdgpu: Rework reset domain to be refcounted.
authorAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Fri, 21 Jan 2022 22:23:32 +0000 (17:23 -0500)
committerAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Wed, 9 Feb 2022 17:17:09 +0000 (12:17 -0500)
The reset domain contains register access semaphor
now and so needs to be present as long as each device
in a hive needs it and so it cannot be binded to XGMI
hive life cycle.
Adress this by making reset domain refcounted and pointed
by each member of the hive and the hive itself.

v4:

Fix crash on boot witrh XGMI hive by adding type to reset_domain.
XGMI will only create a new reset_domain if prevoius was of single
device type meaning it's first boot. Otherwsie it will take a
refocunt to exsiting reset_domain from the amdgou device.

Add a wrapper around reset_domain->refcount get/put
and a wrapper around send to reset wq (Lijo)

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://www.spinics.net/lists/amd-gfx/msg74121.html
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c

index 540a38fe5cd607401d3bd03349e12707e75d3296..cb9764513df8d1e43b6bf0fd7cf4f4d49fed280b 100644 (file)
@@ -813,9 +813,7 @@ struct amd_powerplay {
 #define AMDGPU_RESET_MAGIC_NUM 64
 #define AMDGPU_MAX_DF_PERFMONS 4
 #define AMDGPU_PRODUCT_NAME_LEN 64
-struct amdgpu_reset_domain {
-       struct workqueue_struct *wq;
-};
+struct amdgpu_reset_domain;
 
 struct amdgpu_device {
        struct device                   *dev;
@@ -1104,7 +1102,7 @@ struct amdgpu_device {
        uint32_t                        ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
 
        bool                            ram_is_direct_mapped;
-       struct amdgpu_reset_domain      reset_domain;
+       struct amdgpu_reset_domain      *reset_domain;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
index e3c0ec684a85115920737ca88592b5be86f0b880..d61bc0a0457c3fbce82722f4e011fa789900a8c0 100644 (file)
@@ -2316,7 +2316,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
 
                r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
                                   ring->num_hw_submission, amdgpu_job_hang_limit,
-                                  timeout, adev->reset_domain.wq, ring->sched_score, ring->name);
+                                  timeout, adev->reset_domain->wq, ring->sched_score, ring->name);
                if (r) {
                        DRM_ERROR("Failed to create scheduler on ring %s.\n",
                                  ring->name);
@@ -2439,24 +2439,22 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        if (r)
                goto init_failed;
 
+       /**
+        * In case of XGMI grab extra reference for reset domain for this device
+        */
        if (adev->gmc.xgmi.num_physical_nodes > 1) {
-               struct amdgpu_hive_info *hive;
+               if (amdgpu_xgmi_add_device(adev) == 0) {
+                       struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
 
-               amdgpu_xgmi_add_device(adev);
+                       if (!hive->reset_domain ||
+                           !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
+                               r = -ENOENT;
+                               goto init_failed;
+                       }
 
-               hive = amdgpu_get_xgmi_hive(adev);
-               if (!hive || !hive->reset_domain.wq) {
-                       DRM_ERROR("Failed to obtain reset domain info for XGMI hive:%llx", hive->hive_id);
-                       r = -EINVAL;
-                       goto init_failed;
-               }
-
-               adev->reset_domain.wq = hive->reset_domain.wq;
-       } else {
-               adev->reset_domain.wq = alloc_ordered_workqueue("amdgpu-reset-dev", 0);
-               if (!adev->reset_domain.wq) {
-                       r = -ENOMEM;
-                       goto init_failed;
+                       /* Drop the early temporary reset domain we created for device */
+                       amdgpu_reset_put_reset_domain(adev->reset_domain);
+                       adev->reset_domain = hive->reset_domain;
                }
        }
 
@@ -3640,6 +3638,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                return r;
        }
 
+       /*
+        * Reset domain needs to be present early, before XGMI hive discovered
+        * (if any) and intitialized to use reset sem and in_gpu reset flag
+        * early on during init.
+        */
+       adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE ,"amdgpu-reset-dev");
+       if (!adev->reset_domain)
+               return -ENOMEM;
+
        /* early init functions */
        r = amdgpu_device_ip_early_init(adev);
        if (r)
@@ -4016,6 +4023,9 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
        if (adev->mman.discovery_bin)
                amdgpu_discovery_fini(adev);
 
+       amdgpu_reset_put_reset_domain(adev->reset_domain);
+       adev->reset_domain = NULL;
+
        kfree(adev->pci_state);
 
 }
@@ -5241,7 +5251,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
        INIT_WORK(&work.base, amdgpu_device_queue_gpu_recover_work);
 
-       if (!queue_work(adev->reset_domain.wq, &work.base))
+       if (!amdgpu_reset_domain_schedule(adev->reset_domain, &work.base))
                return -EAGAIN;
 
        flush_work(&work.base);
index 02afd4115675f5e04e2109ba5dff3fec4556a1d8..91864947063fb86644abee108ec3a0f54a415f27 100644 (file)
@@ -96,3 +96,43 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
        return reset_handler->restore_hwcontext(adev->reset_cntl,
                                                reset_context);
 }
+
+
+void amdgpu_reset_destroy_reset_domain(struct kref *ref)
+{
+       struct amdgpu_reset_domain *reset_domain = container_of(ref,
+                                                               struct amdgpu_reset_domain,
+                                                               refcount);
+       if (reset_domain->wq)
+               destroy_workqueue(reset_domain->wq);
+
+       kvfree(reset_domain);
+}
+
+struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
+                                                            char *wq_name)
+{
+       struct amdgpu_reset_domain *reset_domain;
+
+       reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
+       if (!reset_domain) {
+               DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
+               return NULL;
+       }
+
+       reset_domain->type = type;
+       kref_init(&reset_domain->refcount);
+
+       reset_domain->wq = create_singlethread_workqueue(wq_name);
+       if (!reset_domain->wq) {
+               DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
+               amdgpu_reset_put_reset_domain(reset_domain);
+               return NULL;
+
+       }
+
+       return reset_domain;
+}
+
+
+
index e00d38d9160a40dcec96c09ec08139a5a5f2557d..cc625e441fa0abc016412ff0c7e9e7b49071112c 100644 (file)
@@ -70,6 +70,19 @@ struct amdgpu_reset_control {
        void (*async_reset)(struct work_struct *work);
 };
 
+
+enum amdgpu_reset_domain_type {
+       SINGLE_DEVICE,
+       XGMI_HIVE
+};
+
+struct amdgpu_reset_domain {
+       struct kref refcount;
+       struct workqueue_struct *wq;
+       enum amdgpu_reset_domain_type type;
+};
+
+
 int amdgpu_reset_init(struct amdgpu_device *adev);
 int amdgpu_reset_fini(struct amdgpu_device *adev);
 
@@ -82,4 +95,26 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
 int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
                             struct amdgpu_reset_handler *handler);
 
+struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
+                                                            char *wq_name);
+
+void amdgpu_reset_destroy_reset_domain(struct kref *ref);
+
+static inline bool amdgpu_reset_get_reset_domain(struct amdgpu_reset_domain *domain)
+{
+       return kref_get_unless_zero(&domain->refcount) != 0;
+}
+
+static inline void amdgpu_reset_put_reset_domain(struct amdgpu_reset_domain *domain)
+{
+       kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
+}
+
+static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *domain,
+                                               struct work_struct *work)
+{
+       return queue_work(domain->wq, work);
+}
+
+
 #endif
index 89b682afe821715d8fdb038ff9cfece945bbd944..eb06322d2972c0d525ec24f720509c4eb6adf300 100644 (file)
@@ -32,6 +32,8 @@
 #include "wafl/wafl2_4_0_0_smn.h"
 #include "wafl/wafl2_4_0_0_sh_mask.h"
 
+#include "amdgpu_reset.h"
+
 #define smnPCS_XGMI23_PCS_ERROR_STATUS   0x11a01210
 #define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
 #define smnPCS_GOPX1_PCS_ERROR_STATUS    0x12200210
@@ -227,6 +229,9 @@ static void amdgpu_xgmi_hive_release(struct kobject *kobj)
        struct amdgpu_hive_info *hive = container_of(
                kobj, struct amdgpu_hive_info, kobj);
 
+       amdgpu_reset_put_reset_domain(hive->reset_domain);
+       hive->reset_domain = NULL;
+
        mutex_destroy(&hive->hive_lock);
        kfree(hive);
 }
@@ -398,12 +403,24 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
                goto pro_end;
        }
 
-       hive->reset_domain.wq = alloc_ordered_workqueue("amdgpu-reset-hive", 0);
-       if (!hive->reset_domain.wq) {
-               dev_err(adev->dev, "XGMI: failed allocating wq for reset domain!\n");
-               kfree(hive);
-               hive = NULL;
-               goto pro_end;
+       /**
+        * Avoid recreating reset domain when hive is reconstructed for the case
+        * of reset the devices in the XGMI hive during probe for SRIOV
+        * See https://www.spinics.net/lists/amd-gfx/msg58836.html
+        */
+       if (adev->reset_domain->type != XGMI_HIVE) {
+               hive->reset_domain = amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
+                       if (!hive->reset_domain) {
+                               dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
+                               ret = -ENOMEM;
+                               kobject_put(&hive->kobj);
+                               kfree(hive);
+                               hive = NULL;
+                               goto pro_end;
+                       }
+       } else {
+               amdgpu_reset_get_reset_domain(adev->reset_domain);
+               hive->reset_domain = adev->reset_domain;
        }
 
        hive->hive_id = adev->gmc.xgmi.hive_id;
index 2f2ce53645a50443722091be7f69dfb69d54bcb0..dcaad22be492daf1012f5cf00308f9d1416a6a29 100644 (file)
@@ -42,7 +42,7 @@ struct amdgpu_hive_info {
                AMDGPU_XGMI_PSTATE_UNKNOWN
        } pstate;
 
-       struct amdgpu_reset_domain reset_domain;
+       struct amdgpu_reset_domain *reset_domain;
 };
 
 struct amdgpu_pcs_ras_field {
index 5869d51d8beecf5dd24eb55b3b31cd90bcf831aa..6740eef84ee19d9939755080d66fedc2d07b66b9 100644 (file)
@@ -32,6 +32,8 @@
 #include "soc15_common.h"
 #include "mxgpu_ai.h"
 
+#include "amdgpu_reset.h"
+
 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
 {
        WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
@@ -308,8 +310,8 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
        switch (event) {
                case IDH_FLR_NOTIFICATION:
                if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
-                       WARN_ONCE(!queue_work(adev->reset_domain.wq,
-                                             &adev->virt.flr_work),
+                       WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+                                                               &adev->virt.flr_work),
                                  "Failed to queue work! at %s",
                                  __func__);
                break;
index 5728a6401d73fd44f2f207c4b6e68bae0fe08d56..e967d61c7134b9cdebbd8f76ebc18e160a4c9c86 100644 (file)
@@ -31,6 +31,8 @@
 #include "soc15_common.h"
 #include "mxgpu_nv.h"
 
+#include "amdgpu_reset.h"
+
 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
 {
        WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
@@ -338,8 +340,8 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
        switch (event) {
        case IDH_FLR_NOTIFICATION:
                if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
-                       WARN_ONCE(!queue_work(adev->reset_domain.wq,
-                                             &adev->virt.flr_work),
+                       WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+                                  &adev->virt.flr_work),
                                  "Failed to queue work! at %s",
                                  __func__);
                break;
index 02290febfcf4f65294ac1370a73cd5597931f05a..531cfba759ddb92e8c0a796c6e86cde791512572 100644 (file)
@@ -42,6 +42,8 @@
 #include "smu/smu_7_1_3_d.h"
 #include "mxgpu_vi.h"
 
+#include "amdgpu_reset.h"
+
 /* VI golden setting */
 static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
        mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
@@ -551,8 +553,8 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
 
                /* only handle FLR_NOTIFY now */
                if (!r && !amdgpu_in_reset(adev))
-                       WARN_ONCE(!queue_work(adev->reset_domain.wq,
-                                             &adev->virt.flr_work),
+                       WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+                                                               &adev->virt.flr_work),
                                  "Failed to queue work! at %s",
                                  __func__);
        }