1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "kfd_mqd_manager.h"
26 #include "amdgpu_amdkfd.h"
27 #include "kfd_device_queue_manager.h"
29 /* Mapping queue priority to pipe priority, indexed by queue priority */
30 int pipe_priority_map[] = {
31 KFD_PIPE_PRIORITY_CS_LOW,
32 KFD_PIPE_PRIORITY_CS_LOW,
33 KFD_PIPE_PRIORITY_CS_LOW,
34 KFD_PIPE_PRIORITY_CS_LOW,
35 KFD_PIPE_PRIORITY_CS_LOW,
36 KFD_PIPE_PRIORITY_CS_LOW,
37 KFD_PIPE_PRIORITY_CS_LOW,
38 KFD_PIPE_PRIORITY_CS_MEDIUM,
39 KFD_PIPE_PRIORITY_CS_MEDIUM,
40 KFD_PIPE_PRIORITY_CS_MEDIUM,
41 KFD_PIPE_PRIORITY_CS_MEDIUM,
42 KFD_PIPE_PRIORITY_CS_HIGH,
43 KFD_PIPE_PRIORITY_CS_HIGH,
44 KFD_PIPE_PRIORITY_CS_HIGH,
45 KFD_PIPE_PRIORITY_CS_HIGH,
46 KFD_PIPE_PRIORITY_CS_HIGH
49 struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q)
51 struct kfd_mem_obj *mqd_mem_obj = NULL;
53 mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
57 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem;
58 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr;
59 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr;
64 struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev,
65 struct queue_properties *q)
67 struct kfd_mem_obj *mqd_mem_obj = NULL;
70 mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
74 offset = (q->sdma_engine_id *
75 dev->kfd->device_info.num_sdma_queues_per_engine +
77 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size;
79 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
80 NUM_XCC(dev->xcc_mask);
82 mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem
84 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
85 mqd_mem_obj->cpu_ptr = (uint32_t *)((uint64_t)
86 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
91 void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
92 struct kfd_mem_obj *mqd_mem_obj)
94 WARN_ON(!mqd_mem_obj->gtt_mem);
98 void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
99 const uint32_t *cu_mask, uint32_t cu_mask_count,
102 struct kfd_cu_info cu_info;
103 uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
104 bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
105 uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
106 int i, se, sh, cu, cu_bitmap_sh_mul, inc = wgp_mode_req ? 2 : 1;
108 amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
110 if (cu_mask_count > cu_info.cu_active_number)
111 cu_mask_count = cu_info.cu_active_number;
113 /* Exceeding these bounds corrupts the stack and indicates a coding error.
114 * Returning with no CU's enabled will hang the queue, which should be
115 * attention grabbing.
117 if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
118 pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
121 if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
122 pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
123 cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
127 cu_bitmap_sh_mul = (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0) &&
128 KFD_GC_VERSION(mm->dev) < IP_VERSION(12, 0, 0)) ? 2 : 1;
130 /* Count active CUs per SH.
132 * Some CUs in an SH may be disabled. HW expects disabled CUs to be
133 * represented in the high bits of each SH's enable mask (the upper and lower
134 * 16 bits of se_mask) and will take care of the actual distribution of
135 * disabled CUs within each SH automatically.
136 * Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
138 * See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
139 * See note on GFX11 cu_bitmap layout in gfx_v11_0_get_cu_info.
141 for (se = 0; se < cu_info.num_shader_engines; se++)
142 for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
143 cu_per_sh[se][sh] = hweight32(
144 cu_info.cu_bitmap[se % 4][sh + (se / 4) * cu_bitmap_sh_mul]);
146 /* Symmetrically map cu_mask to all SEs & SHs:
147 * se_mask programs up to 2 SH in the upper and lower 16 bits.
150 * Assuming 1 SH/SE, 4 SEs:
151 * cu_mask[0] bit0 -> se_mask[0] bit0
152 * cu_mask[0] bit1 -> se_mask[1] bit0
154 * cu_mask[0] bit4 -> se_mask[0] bit1
157 * Assuming 2 SH/SE, 4 SEs
158 * cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
159 * cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
161 * cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
162 * cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
164 * cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
167 * First ensure all CUs are disabled, then enable user specified CUs.
169 for (i = 0; i < cu_info.num_shader_engines; i++)
173 for (cu = 0; cu < 16; cu += inc) {
174 for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
175 for (se = 0; se < cu_info.num_shader_engines; se++) {
176 if (cu_per_sh[se][sh] > cu) {
177 if (cu_mask[i / 32] & (en_mask << (i % 32)))
178 se_mask[se] |= en_mask << (cu + sh * 16);
180 if (i == cu_mask_count)
188 int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
189 uint32_t pipe_id, uint32_t queue_id,
190 struct queue_properties *p, struct mm_struct *mms)
192 return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
193 queue_id, p->doorbell_off, 0);
196 int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
197 enum kfd_preempt_type type, unsigned int timeout,
198 uint32_t pipe_id, uint32_t queue_id)
200 return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
201 pipe_id, queue_id, 0);
204 void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
205 struct kfd_mem_obj *mqd_mem_obj)
207 if (mqd_mem_obj->gtt_mem) {
208 amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem);
211 kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
215 bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
216 uint64_t queue_address, uint32_t pipe_id,
219 return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
220 pipe_id, queue_id, 0);
223 int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
224 uint32_t pipe_id, uint32_t queue_id,
225 struct queue_properties *p, struct mm_struct *mms)
227 return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
228 (uint32_t __user *)p->write_ptr,
233 * preempt type here is ignored because there is only one way
234 * to preempt sdma queue
236 int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
237 enum kfd_preempt_type type,
238 unsigned int timeout, uint32_t pipe_id,
241 return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
244 bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
245 uint64_t queue_address, uint32_t pipe_id,
248 return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
251 uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev)
253 return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
256 void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd_mem_obj,
257 uint32_t virtual_xcc_id)
261 offset = kfd_hiq_mqd_stride(dev) * virtual_xcc_id;
263 mqd_mem_obj->gtt_mem = (virtual_xcc_id == 0) ?
264 dev->dqm->hiq_sdma_mqd.gtt_mem : NULL;
265 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
266 mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)
267 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
270 uint64_t kfd_mqd_stride(struct mqd_manager *mm,
271 struct queue_properties *q)