From: Yong Zhao Date: Wed, 5 Feb 2020 21:53:37 +0000 (-0500) Subject: drm/amdkfd: Delete unnecessary unmap queue package submissions X-Git-Tag: v5.10.7~2823^2~26^2~20 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c7637c95abebc811d2479a5b924859025def544a;p=platform%2Fkernel%2Flinux-rpi.git drm/amdkfd: Delete unnecessary unmap queue package submissions The previous way of using SDMA queue count to infer whether we should unmap SDMA engines has bugs. The reason it did not cause issues is because MEC firmware unmaps all queues (CP + SDMA) when a unmap package for compute engine is received. Becasue of that, only one unmap queue package is needed, instead of one unmap queue package for CP and each SDMA engine, which results in much simpler driver code. Signed-off-by: Yong Zhao Acked-by: Alex Deucher Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 958275db3f55..692abfd2088a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -109,6 +109,11 @@ static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm) return dqm->dev->device_info->num_xgmi_sdma_engines; } +static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) +{ + return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm); +} + unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { return dqm->dev->device_info->num_sdma_engines @@ -375,11 +380,6 @@ add_queue_to_list: if (q->properties.is_active) increment_queue_count(dqm, q->properties.type); - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; - /* * Unconditionally increment this counter, regardless of the queue's * type or whether the queue is active. @@ -460,15 +460,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; - if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) deallocate_hqd(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - dqm->sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - dqm->xgmi_sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - } else { + else { pr_debug("q->properties.type %d is invalid\n", q->properties.type); return -EINVAL; @@ -915,8 +913,6 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) INIT_LIST_HEAD(&dqm->queues); dqm->active_queue_count = dqm->next_pipe_to_allocate = 0; dqm->active_cp_queue_count = 0; - dqm->sdma_queue_count = 0; - dqm->xgmi_sdma_queue_count = 0; for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { int pipe_offset = pipe * get_queues_per_pipe(dqm); @@ -981,8 +977,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, int bit; if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - if (dqm->sdma_bitmap == 0) + if (dqm->sdma_bitmap == 0) { + pr_err("No more SDMA queue to allocate\n"); return -ENOMEM; + } + bit = __ffs64(dqm->sdma_bitmap); dqm->sdma_bitmap &= ~(1ULL << bit); q->sdma_id = bit; @@ -991,8 +990,10 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - if (dqm->xgmi_sdma_bitmap == 0) + if (dqm->xgmi_sdma_bitmap == 0) { + pr_err("No more XGMI SDMA queue to allocate\n"); return -ENOMEM; + } bit = __ffs64(dqm->xgmi_sdma_bitmap); dqm->xgmi_sdma_bitmap &= ~(1ULL << bit); q->sdma_id = bit; @@ -1081,8 +1082,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) INIT_LIST_HEAD(&dqm->queues); dqm->active_queue_count = dqm->processes_count = 0; dqm->active_cp_queue_count = 0; - dqm->sdma_queue_count = 0; - dqm->xgmi_sdma_queue_count = 0; + dqm->active_runlist = false; dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); @@ -1254,11 +1254,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, list_add(&q->list, &qpd->queues_list); qpd->queue_count++; - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; - if (q->properties.is_active) { increment_queue_count(dqm, q->properties.type); @@ -1315,20 +1310,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr, return 0; } -static int unmap_sdma_queues(struct device_queue_manager *dqm) -{ - int i, retval = 0; - - for (i = 0; i < dqm->dev->device_info->num_sdma_engines + - dqm->dev->device_info->num_xgmi_sdma_engines; i++) { - retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i); - if (retval) - return retval; - } - return retval; -} - /* dqm->lock mutex has to be locked before calling this function */ static int map_queues_cpsch(struct device_queue_manager *dqm) { @@ -1366,12 +1347,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->active_runlist) return retval; - pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n", - dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count); - - if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count) - unmap_sdma_queues(dqm); - retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, filter, filter_param, false, 0); if (retval) @@ -1444,13 +1419,10 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, deallocate_doorbell(qpd, q); - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - dqm->sdma_queue_count--; + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - dqm->xgmi_sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - } list_del(&q->list); qpd->queue_count--; @@ -1673,13 +1645,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, /* Clear all user mode queues */ list_for_each_entry(q, &qpd->queues_list, list) { - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - dqm->sdma_queue_count--; + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - dqm->xgmi_sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - } if (q->properties.is_active) decrement_queue_count(dqm, q->properties.type); @@ -1759,8 +1728,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) struct kfd_dev *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * - (dev->device_info->num_sdma_engines + - dev->device_info->num_xgmi_sdma_engines) * + get_num_all_sdma_engines(dqm) * dev->device_info->num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; @@ -2012,8 +1980,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) } } - for (pipe = 0; pipe < get_num_sdma_engines(dqm) + - get_num_xgmi_sdma_engines(dqm); pipe++) { + for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { for (queue = 0; queue < dqm->dev->device_info->num_sdma_queues_per_engine; queue++) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 05e0afc04cd9..50d919f814e9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -182,8 +182,6 @@ struct device_queue_manager { unsigned int processes_count; unsigned int active_queue_count; unsigned int active_cp_queue_count; - unsigned int sdma_queue_count; - unsigned int xgmi_sdma_queue_count; unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 95002278990e..084c35f55d59 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -241,16 +241,12 @@ int pqm_create_queue(struct process_queue_manager *pqm, switch (type) { case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA_XGMI: - if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count - >= get_num_sdma_queues(dev->dqm)) || - (type == KFD_QUEUE_TYPE_SDMA_XGMI && - dev->dqm->xgmi_sdma_queue_count - >= get_num_xgmi_sdma_queues(dev->dqm))) { - pr_debug("Over-subscription is not allowed for SDMA.\n"); - retval = -EPERM; - goto err_create_queue; - } - + /* SDMA queues are always allocated statically no matter + * which scheduler mode is used. We also do not need to + * check whether a SDMA queue can be allocated here, because + * allocate_sdma_queue() in create_queue() has the + * corresponding check logic. + */ retval = init_user_queue(pqm, dev, &q, properties, f, *qid); if (retval != 0) goto err_create_queue;