int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe,
adev->gfx.num_compute_rings);
- int num_xcd = (adev->gfx.num_xcd > 1) ? adev->gfx.num_xcd : 1;
+ int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
if (multipipe_policy) {
/* policy: make queues evenly cross all pipes on MEC1 only
* for multiple xcc, just use the original policy for simplicity */
- for (j = 0; j < num_xcd; j++) {
+ for (j = 0; j < num_xcc; j++) {
for (i = 0; i < max_queues_per_mec; i++) {
pipe = i % adev->gfx.mec.num_pipe_per_mec;
queue = (i / adev->gfx.mec.num_pipe_per_mec) %
}
} else {
/* policy: amdgpu owns all queues in the given pipe */
- for (j = 0; j < num_xcd; j++) {
+ for (j = 0; j < num_xcc; j++) {
for (i = 0; i < max_queues_per_mec; ++i)
set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
}
}
- for (j = 0; j < num_xcd; j++) {
+ for (j = 0; j < num_xcc; j++) {
dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
}
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amdgpu_gfx_partition mode;
- int ret;
+ int ret = 0, num_xcc;
- if (adev->gfx.num_xcd % 2 != 0)
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ if (num_xcc % 2 != 0)
return -EINVAL;
if (!strncasecmp("SPX", buf, strlen("SPX"))) {
mode = AMDGPU_SPX_PARTITION_MODE;
} else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
- if (adev->gfx.num_xcd != 4 || adev->gfx.num_xcd != 8)
+ if (num_xcc != 4 || num_xcc != 8)
return -EINVAL;
mode = AMDGPU_DPX_PARTITION_MODE;
} else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
- if (adev->gfx.num_xcd != 6)
+ if (num_xcc != 6)
return -EINVAL;
mode = AMDGPU_TPX_PARTITION_MODE;
} else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
- if (adev->gfx.num_xcd != 8)
+ if (num_xcc != 8)
return -EINVAL;
mode = AMDGPU_QPX_PARTITION_MODE;
} else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
char *supported_partition;
/* TBD */
- switch (adev->gfx.num_xcd) {
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
case 8:
supported_partition = "SPX, DPX, QPX, CPX";
break;
static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
- int i;
- for (i = 0; i < adev->gfx.num_xcd; i++)
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++)
adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
}
static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
{
- int i;
+ int i, num_xcc;
- for (i = 2; i < adev->gfx.num_xcd; i++)
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 2; i < num_xcc; i++)
WREG32_SOC15(GC, i, regGRBM_MCM_ADDR, 0x4);
}
static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
{
- int r, i;
+ int r, i, num_xcc;
u32 *hpd;
const __le32 *fw_data;
unsigned fw_size;
const struct gfx_firmware_header_v1_0 *mec_hdr;
- for (i = 0; i < adev->gfx.num_xcd; i++)
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++)
bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
AMDGPU_MAX_COMPUTE_QUEUES);
enum amdgpu_gfx_partition mode)
{
u32 tmp = 0;
- int num_xcc_per_partition, i;
+ int num_xcc_per_partition, i, num_xcc;
if (mode == adev->gfx.partition_mode)
return mode;
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (mode) {
case AMDGPU_SPX_PARTITION_MODE:
- num_xcc_per_partition = adev->gfx.num_xcd;
+ num_xcc_per_partition = num_xcc;
break;
case AMDGPU_DPX_PARTITION_MODE:
- num_xcc_per_partition = adev->gfx.num_xcd / 2;
+ num_xcc_per_partition = num_xcc / 2;
break;
case AMDGPU_TPX_PARTITION_MODE:
- num_xcc_per_partition = adev->gfx.num_xcd / 3;
+ num_xcc_per_partition = num_xcc / 3;
break;
case AMDGPU_QPX_PARTITION_MODE:
- num_xcc_per_partition = adev->gfx.num_xcd / 4;
+ num_xcc_per_partition = num_xcc / 4;
break;
case AMDGPU_CPX_PARTITION_MODE:
num_xcc_per_partition = 1;
* Stop user queues and threads, and make sure GPU is empty of work.
*/
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ for (i = 0; i < num_xcc; i++) {
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
num_xcc_per_partition);
tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
static int gfx_v9_4_3_sw_init(void *handle)
{
- int i, j, k, r, ring_id, xcc_id;
+ int i, j, k, r, ring_id, xcc_id, num_xcc;
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
if (r)
/* set up the compute queues - allocate horizontally across pipes */
ring_id = 0;
- for (xcc_id = 0; xcc_id < adev->gfx.num_xcd; xcc_id++) {
-
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
static int gfx_v9_4_3_sw_fini(void *handle)
{
- int i;
+ int i, num_xcc;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- for (i = 0; i < adev->gfx.num_compute_rings *
- adev->gfx.num_xcd; i++)
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ for (i = 0; i < num_xcc; i++) {
amdgpu_gfx_mqd_sw_fini(adev, i);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
amdgpu_gfx_kiq_fini(adev, i);
static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
- int i, j;
+ int i, j, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
WREG32_FIELD15_PREREG(GC, i, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v9_4_3_setup_rb(adev, i);
}
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
- for (j = 0; j < adev->gfx.num_xcd; j++) {
+ for (j = 0; j < num_xcc; j++) {
soc15_grbm_select(adev, 0, 0, 0, i, j);
/* CP and shaders */
if (i == 0) {
mutex_unlock(&adev->srbm_mutex);
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ for (i = 0; i < num_xcc; i++) {
gfx_v9_4_3_init_compute_vmid(adev, i);
gfx_v9_4_3_init_gds_vmid(adev, i);
}
static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id)
{
uint32_t tmp = 0;
+ int num_xcc;
- switch (adev->gfx.num_xcd) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ switch (num_xcc) {
/* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */
case 1:
WREG32_SOC15(GC, xcc_id, regCP_HYP_XCP_CTL, 0x8);
static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
{
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i);
gfx_v9_4_3_wait_for_rlc_serdes(adev, i);
static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
{
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
#ifdef AMDGPU_RLC_DEBUG_RETRY
u32 rlc_ucode_ver;
#endif
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 1);
udelay(50);
static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
{
- int r, i;
+ int r, i, num_xcc;
adev->gfx.rlc.funcs->stop(adev);
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
/* disable CG */
WREG32_SOC15(GC, i, regRLC_CGCG_CGLS_CTRL, 0);
static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
{
- int r, i, j;
+ int r, i, j, num_xcc;
struct amdgpu_ring *ring;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
static int gfx_v9_4_3_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
+ int i, num_xcc;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
if (amdgpu_gfx_disable_kcq(adev, i))
DRM_ERROR("XCD %d KCQ disable failed\n", i);
static bool gfx_v9_4_3_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
if (REG_GET_FIELD(RREG32_SOC15(GC, i, regGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
return false;
static int gfx_v9_4_3_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int num_xcc;
- /* hardcode in emulation phase */
- adev->gfx.num_xcd = 1;
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
adev->gfx.partition_mode = amdgpu_user_partt_mode;
/* calculate the num_xcc_in_xcp for the partition mode*/
switch (amdgpu_user_partt_mode) {
case AMDGPU_SPX_PARTITION_MODE:
- adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd;
+ adev->gfx.num_xcc_per_xcp = num_xcc;
break;
case AMDGPU_DPX_PARTITION_MODE:
- adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 2;
+ adev->gfx.num_xcc_per_xcp = num_xcc / 2;
break;
case AMDGPU_TPX_PARTITION_MODE:
- adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 3;
+ adev->gfx.num_xcc_per_xcp = num_xcc / 3;
break;
case AMDGPU_QPX_PARTITION_MODE:
- adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 4;
+ adev->gfx.num_xcc_per_xcp = num_xcc / 4;
break;
case AMDGPU_CPX_PARTITION_MODE:
adev->gfx.num_xcc_per_xcp = 1;
break;
default:
- adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd;
+ adev->gfx.num_xcc_per_xcp = num_xcc;
break;
}
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
+ int i, num_xcc;
if (amdgpu_sriov_vf(adev))
return 0;
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 3):
- for (i = 0; i < adev->gfx.num_xcd; i++)
+ for (i = 0; i < num_xcc; i++)
gfx_v9_4_3_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE, i);
break;
unsigned type,
enum amdgpu_interrupt_state state)
{
- int i;
+ int i, num_xcc;
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- for (i = 0; i < adev->gfx.num_xcd; i++)
+ for (i = 0; i < num_xcc; i++)
WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
unsigned type,
enum amdgpu_interrupt_state state)
{
- int i;
+ int i, num_xcc;
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
- for (i = 0; i < adev->gfx.num_xcd; i++)
+ for (i = 0; i < num_xcc; i++)
WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0,
PRIV_INSTR_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
unsigned type,
enum amdgpu_interrupt_state state)
{
- int i;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ int i, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
switch (type) {
case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 0, state, i);
/* Per-queue interrupt is supported for MEC starting from VI.
* The interrupt can only be enabled/disabled per pipe instead of per queue.
*/
+
if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
amdgpu_fence_process(ring);
}
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
{
- int i, j;
+ int i, j, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
for (j = 0; j < adev->gfx.num_compute_rings; j++)
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub;
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
WREG32_SOC15_OFFSET(GC, i,
regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
hub->ctx_addr_distance * vmid,
upper_32_bits(page_table_base));
+
}
}
static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev)
{
uint64_t pt_base;
- int i;
+ int i, num_xcc;
if (adev->gmc.pdb0_bo)
pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
/* If use GART for FB translation, vmid0 page table covers both
* vram and system memory (gart)
*/
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(GC, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
{
uint64_t value;
uint32_t tmp;
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
/* Program the AGP BAR */
WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BASE, 0);
WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
/* Setup TLB control */
tmp = RREG32_SOC15(GC, i, regMC_VM_MX_L1_TLB_CNTL);
static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev)
{
uint32_t tmp;
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
tmp = RREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev)
{
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
WREG32_SOC15(GC, i,
regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0XFFFFFFFF);
struct amdgpu_vmhub *hub;
unsigned num_level, block_size;
uint32_t tmp;
- int i, j;
+ int i, j, num_xcc;
num_level = adev->vm_manager.num_level;
block_size = adev->vm_manager.block_size;
else
block_size -= 9;
- for (j = 0; j < adev->gfx.num_xcd; j++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (j = 0; j < num_xcc; j++) {
hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, i);
static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub;
- unsigned i, j;
+ unsigned i, j, num_xcc;
- for (j = 0; j < adev->gfx.num_xcd; j++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (j = 0; j < num_xcc; j++) {
hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
+
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
i * hub->eng_addr_distance, 0xffffffff);
static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
{
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
if (amdgpu_sriov_vf(adev)) {
/*
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
{
struct amdgpu_vmhub *hub;
u32 tmp;
- u32 i, j;
+ u32 i, j, num_xcc;
- for (j = 0; j < adev->gfx.num_xcd; j++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (j = 0; j < num_xcc; j++) {
hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
/* Disable all tables */
for (i = 0; i < 16; i++)
bool value)
{
u32 tmp;
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
static void gfxhub_v1_2_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub;
- int i;
+ int i, num_xcc;
- for (i = 0; i < adev->gfx.num_xcd; i++) {
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ for (i = 0; i < num_xcc; i++) {
hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
hub->ctx0_ptb_addr_lo32 =