r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+
+ /*set to low pstate by default */
+ amdgpu_xgmi_set_pstate(adev, 0);
+
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
/* If the mappings are cleared or filled */
bool cleared;
+
+ bool is_xgmi;
};
struct amdgpu_bo {
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
+#include "amdgpu_xgmi.h"
/**
* DOC: GPUVM
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
+ if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev))) {
+ bo_va->is_xgmi = true;
+ mutex_lock(&adev->vm_manager.lock_pstate);
+ /* Power up XGMI if it can be potentially used */
+ if (++adev->vm_manager.xgmi_map_counter == 1)
+ amdgpu_xgmi_set_pstate(adev, 1);
+ mutex_unlock(&adev->vm_manager.lock_pstate);
+ }
+
return bo_va;
}
}
dma_fence_put(bo_va->last_pt_update);
+
+ if (bo && bo_va->is_xgmi) {
+ mutex_lock(&adev->vm_manager.lock_pstate);
+ if (--adev->vm_manager.xgmi_map_counter == 0)
+ amdgpu_xgmi_set_pstate(adev, 0);
+ mutex_unlock(&adev->vm_manager.lock_pstate);
+ }
+
kfree(bo_va);
}
idr_init(&adev->vm_manager.pasid_idr);
spin_lock_init(&adev->vm_manager.pasid_lock);
+
+ adev->vm_manager.xgmi_map_counter = 0;
+ mutex_init(&adev->vm_manager.lock_pstate);
}
/**
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
+
+ /* counter of mapped memory through xgmi */
+ uint32_t xgmi_map_counter;
+ struct mutex lock_pstate;
};
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
if (lock)
mutex_lock(&tmp->hive_lock);
-
+ tmp->pstate = -1;
mutex_unlock(&xgmi_mutex);
return tmp;
}
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
+{
+ int ret = 0;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+
+ if (!hive)
+ return 0;
+
+ if (hive->pstate == pstate)
+ return 0;
+ /* Todo : sent the message to SMU for pstate change */
+ return ret;
+}
+
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
int ret = -EINVAL;
struct kobject *kobj;
struct device_attribute dev_attr;
struct amdgpu_device *adev;
+ int pstate; /*0 -- low , 1 -- high , -1 unknown*/
};
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
+
+static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev)
+{
+ return (adev != bo_adev &&
+ adev->gmc.xgmi.hive_id &&
+ adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
+}
#endif