select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
+ select INTERVAL_TREE
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) ||
- (obj->id != id))
+ if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
+ obj = NULL;
+ if (obj && obj->id != id)
+ obj = NULL;
+ /* don't leak out unref'd fb's */
+ if (obj && (obj->type == DRM_MODE_OBJECT_FB))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
obj = _object_find(dev, id, type);
- /* don't leak out unref'd fb's */
- if (obj && (obj->type == DRM_MODE_OBJECT_FB))
- obj = NULL;
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
DRM_DEBUG("open_count = %d\n", dev->open_count);
+ mutex_lock(&dev->struct_mutex);
+ list_del(&file_priv->lhead);
+ mutex_unlock(&dev->struct_mutex);
+
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
drm_master_put(&file_priv->master);
mutex_unlock(&dev->master_mutex);
- mutex_lock(&dev->struct_mutex);
- list_del(&file_priv->lhead);
- mutex_unlock(&dev->struct_mutex);
-
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- /* FIXME: This access isn't protected by any locks. */
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
if (!intel_encoder->connectors_active)
return;
* we'll check the link status via the normal hot plug path later -
* but for short hpds we should check it now
*/
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
return false;
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o
+ ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o
# add async DMA block
radeon-y += \
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
- /* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ /* get the native mode for scaling */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
-
- /* get the native mode for TV */
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ } else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
if (tv_dac->tv_std == TV_STD_NTSC ||
else
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
}
+ } else if (radeon_encoder->rmx_type != RMX_OFF) {
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
}
if (ASIC_IS_DCE3(rdev) &&
if (radeon_connector->use_digital &&
(radeon_connector->audio == RADEON_AUDIO_ENABLE))
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
} else if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
pi->vddc_leakage.count = 0;
pi->vddci_leakage.count = 0;
- if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
+ continue;
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+ pi->vddc_leakage.count++;
+ }
+ }
+ } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
if (!rdev->smc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- ucode_start_address = BONAIRE_SMC_UCODE_START;
- ucode_size = BONAIRE_SMC_UCODE_SIZE;
- break;
- case CHIP_HAWAII:
- ucode_start_address = HAWAII_SMC_UCODE_START;
- ucode_size = HAWAII_SMC_UCODE_SIZE;
- break;
- default:
- DRM_ERROR("unknown asic in smc ucode loader\n");
- BUG();
+ if (rdev->new_fw) {
+ const struct smc_firmware_header_v1_0 *hdr =
+ (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+ radeon_ucode_print_smc_hdr(&hdr->header);
+
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ ucode_start_address = BONAIRE_SMC_UCODE_START;
+ ucode_size = BONAIRE_SMC_UCODE_SIZE;
+ break;
+ case CHIP_HAWAII:
+ ucode_start_address = HAWAII_SMC_UCODE_START;
+ ucode_size = HAWAII_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+
+ src = (const u8 *)rdev->smc_fw->data;
}
if (ucode_size & 3)
return -EINVAL;
- src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
+
+MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
+MODULE_FIRMWARE("radeon/bonaire_me.bin");
+MODULE_FIRMWARE("radeon/bonaire_ce.bin");
+MODULE_FIRMWARE("radeon/bonaire_mec.bin");
+MODULE_FIRMWARE("radeon/bonaire_mc.bin");
+MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
+MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
+MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+
MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
MODULE_FIRMWARE("radeon/HAWAII_me.bin");
MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
+
+MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
+MODULE_FIRMWARE("radeon/hawaii_me.bin");
+MODULE_FIRMWARE("radeon/hawaii_ce.bin");
+MODULE_FIRMWARE("radeon/hawaii_mec.bin");
+MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
+MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
+MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
+MODULE_FIRMWARE("radeon/kaveri_me.bin");
+MODULE_FIRMWARE("radeon/kaveri_ce.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
+MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
+MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
+
MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
MODULE_FIRMWARE("radeon/KABINI_me.bin");
MODULE_FIRMWARE("radeon/KABINI_ce.bin");
MODULE_FIRMWARE("radeon/KABINI_mec.bin");
MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kabini_pfp.bin");
+MODULE_FIRMWARE("radeon/kabini_me.bin");
+MODULE_FIRMWARE("radeon/kabini_ce.bin");
+MODULE_FIRMWARE("radeon/kabini_mec.bin");
+MODULE_FIRMWARE("radeon/kabini_rlc.bin");
+MODULE_FIRMWARE("radeon/kabini_sdma.bin");
+
MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
MODULE_FIRMWARE("radeon/MULLINS_me.bin");
MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
+MODULE_FIRMWARE("radeon/mullins_pfp.bin");
+MODULE_FIRMWARE("radeon/mullins_me.bin");
+MODULE_FIRMWARE("radeon/mullins_ce.bin");
+MODULE_FIRMWARE("radeon/mullins_mec.bin");
+MODULE_FIRMWARE("radeon/mullins_rlc.bin");
+MODULE_FIRMWARE("radeon/mullins_sdma.bin");
+
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
*/
int ci_mc_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
+ const __be32 *fw_data = NULL;
+ const __le32 *new_fw_data = NULL;
u32 running, blackout = 0;
- u32 *io_mc_regs;
+ u32 *io_mc_regs = NULL;
+ const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
- ucode_size = rdev->mc_fw->size / 4;
+ if (rdev->new_fw) {
+ const struct mc_firmware_header_v1_0 *hdr =
+ (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- io_mc_regs = (u32 *)&bonaire_io_mc_regs;
- regs_size = BONAIRE_IO_MC_REGS_SIZE;
- break;
- case CHIP_HAWAII:
- io_mc_regs = (u32 *)&hawaii_io_mc_regs;
- regs_size = HAWAII_IO_MC_REGS_SIZE;
- break;
- default:
- return -EINVAL;
+ radeon_ucode_print_mc_hdr(&hdr->header);
+
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ ucode_size = rdev->mc_fw->size / 4;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ io_mc_regs = (u32 *)&bonaire_io_mc_regs;
+ regs_size = BONAIRE_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAWAII:
+ io_mc_regs = (u32 *)&hawaii_io_mc_regs;
+ regs_size = HAWAII_IO_MC_REGS_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ fw_data = (const __be32 *)rdev->mc_fw->data;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
- WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ if (rdev->new_fw) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ } else {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
}
/* load the MC ucode */
- fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < ucode_size; i++)
- WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ for (i = 0; i < ucode_size; i++) {
+ if (rdev->new_fw)
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ else
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ }
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
static int cik_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
+ const char *new_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
mec_req_size, rlc_req_size, mc_req_size = 0,
sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
char fw_name[30];
+ int new_fw = 0;
int err;
+ int num_fw;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_BONAIRE:
chip_name = "BONAIRE";
+ new_chip_name = "bonaire";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
+ num_fw = 8;
break;
case CHIP_HAWAII:
chip_name = "HAWAII";
+ new_chip_name = "hawaii";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
+ num_fw = 8;
break;
case CHIP_KAVERI:
chip_name = "KAVERI";
+ new_chip_name = "kaveri";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = KV_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 7;
break;
case CHIP_KABINI:
chip_name = "KABINI";
+ new_chip_name = "kabini";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = KB_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 6;
break;
case CHIP_MULLINS:
chip_name = "MULLINS";
+ new_chip_name = "mullins";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = ML_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 6;
break;
default: BUG();
}
- DRM_INFO("Loading %s Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", new_chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
- err = -EINVAL;
- goto out;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->pfp_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->me_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->ce_fw->size != ce_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->ce_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->ce_fw->size != ce_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->ce_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name);
err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->mec_fw->size != mec_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->mec_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->mec_fw->size != mec_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mec_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->mec_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
+ }
+
+ if (rdev->family == CHIP_KAVERI) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name);
+ err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev);
+ if (err) {
+ goto out;
+ } else {
+ err = radeon_ucode_validate(rdev->mec2_fw);
+ if (err) {
+ goto out;
+ } else {
+ new_fw++;
+ }
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
- "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->rlc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name);
err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->sdma_fw->size != sdma_req_size) {
- printk(KERN_ERR
- "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
- rdev->sdma_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->sdma_fw->size != sdma_req_size) {
+ printk(KERN_ERR
+ "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
+ rdev->sdma_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->sdma_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
/* No SMC, MC ucode on APUs */
if (!(rdev->flags & RADEON_IS_IGP)) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ }
+ if ((rdev->mc_fw->size != mc_req_size) &&
+ (rdev->mc_fw->size != mc2_req_size)){
+ printk(KERN_ERR
+ "cik_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+ } else {
+ err = radeon_ucode_validate(rdev->mc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
goto out;
+ } else {
+ new_fw++;
+ }
}
- if ((rdev->mc_fw->size != mc_req_size) &&
- (rdev->mc_fw->size != mc2_req_size)){
- printk(KERN_ERR
- "cik_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "cik_smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "cik_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->smc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
}
+ if (new_fw == 0) {
+ rdev->new_fw = false;
+ } else if (new_fw < num_fw) {
+ printk(KERN_ERR "ci_fw: mixing new and old firmware!\n");
+ err = -EINVAL;
+ } else {
+ rdev->new_fw = true;
+ }
+
out:
if (err) {
if (err != -EINVAL)
rdev->me_fw = NULL;
release_firmware(rdev->ce_fw);
rdev->ce_fw = NULL;
+ release_firmware(rdev->mec_fw);
+ rdev->mec_fw = NULL;
+ release_firmware(rdev->mec2_fw);
+ rdev->mec2_fw = NULL;
release_firmware(rdev->rlc_fw);
rdev->rlc_fw = NULL;
+ release_firmware(rdev->sdma_fw);
+ rdev->sdma_fw = NULL;
release_firmware(rdev->mc_fw);
rdev->mc_fw = NULL;
release_firmware(rdev->smc_fw);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
- /* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
/**
radeon_ring_write(ring, upper_32_bits(addr));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
- /* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
*/
static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
cik_cp_gfx_enable(rdev, false);
- /* PFP */
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
- WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __be32 *)rdev->ce_fw->data;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
- WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)rdev->me_fw->data;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
- WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *pfp_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ const struct gfx_firmware_header_v1_0 *ce_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ const struct gfx_firmware_header_v1_0 *me_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+ radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+ radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ }
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
*/
static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->mec_fw)
cik_cp_compute_enable(rdev, false);
- /* MEC1 */
- fw_data = (const __be32 *)rdev->mec_fw->data;
- WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
- for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
- WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *mec_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&mec_hdr->header);
+
+ /* MEC1 */
+ fw_data = (const __le32 *)
+ (rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
- if (rdev->family == CHIP_KAVERI) {
/* MEC2 */
+ if (rdev->family == CHIP_KAVERI) {
+ const struct gfx_firmware_header_v1_0 *mec2_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+
+ fw_data = (const __le32 *)
+ (rdev->mec2_fw->data +
+ le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ }
+ } else {
+ const __be32 *fw_data;
+
+ /* MEC1 */
fw_data = (const __be32 *)rdev->mec_fw->data;
- WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
- WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+
+ if (rdev->family == CHIP_KAVERI) {
+ /* MEC2 */
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
+ WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ }
}
return 0;
r = radeon_bo_create(rdev,
rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->mec.hpd_eop_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
r = radeon_bo_create(rdev,
sizeof(struct bonaire_mqd),
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->ring[idx].mqd_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
+ int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
if (vm == NULL)
return;
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
radeon_ring_write(ring,
radeon_ring_write(ring, 1 << vm->id);
/* compute doesn't have PFP */
- if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
+ if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
static int cik_rlc_resume(struct radeon_device *rdev)
{
u32 i, size, tmp;
- const __be32 *fw_data;
if (!rdev->rlc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- default:
- size = BONAIRE_RLC_UCODE_SIZE;
- break;
- case CHIP_KAVERI:
- size = KV_RLC_UCODE_SIZE;
- break;
- case CHIP_KABINI:
- size = KB_RLC_UCODE_SIZE;
- break;
- case CHIP_MULLINS:
- size = ML_RLC_UCODE_SIZE;
- break;
- }
-
cik_rlc_stop(rdev);
/* disable CG */
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- fw_data = (const __be32 *)rdev->rlc_fw->data;
+ if (rdev->new_fw) {
+ const struct rlc_firmware_header_v1_0 *hdr =
+ (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+ const __le32 *fw_data = (const __le32 *)
+ (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ radeon_ucode_print_rlc_hdr(&hdr->header);
+
+ size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
WREG32(RLC_GPM_UCODE_ADDR, 0);
- for (i = 0; i < size; i++)
- WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(RLC_GPM_UCODE_ADDR, 0);
+ for (i = 0; i < size; i++)
+ WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ default:
+ size = BONAIRE_RLC_UCODE_SIZE;
+ break;
+ case CHIP_KAVERI:
+ size = KV_RLC_UCODE_SIZE;
+ break;
+ case CHIP_KABINI:
+ size = KB_RLC_UCODE_SIZE;
+ break;
+ case CHIP_MULLINS:
+ size = ML_RLC_UCODE_SIZE;
+ break;
+ }
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ for (i = 0; i < size; i++)
+ WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ }
/* XXX - find out what chips support lbpw */
cik_enable_lbpw(rdev, false);
void cik_init_cp_pg_table(struct radeon_device *rdev)
{
- const __be32 *fw_data;
volatile u32 *dst_ptr;
int me, i, max_me = 4;
u32 bo_offset = 0;
- u32 table_offset;
+ u32 table_offset, table_size;
if (rdev->family == CHIP_KAVERI)
max_me = 5;
/* write the cp table buffer */
dst_ptr = rdev->rlc.cp_table_ptr;
for (me = 0; me < max_me; me++) {
- if (me == 0) {
- fw_data = (const __be32 *)rdev->ce_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
- } else if (me == 1) {
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
- } else if (me == 2) {
- fw_data = (const __be32 *)rdev->me_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
+ if (rdev->new_fw) {
+ const __le32 *fw_data;
+ const struct gfx_firmware_header_v1_0 *hdr;
+
+ if (me == 0) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+ bo_offset += table_size;
} else {
- fw_data = (const __be32 *)rdev->mec_fw->data;
- table_offset = CP_MEC_TABLE_OFFSET;
- }
+ const __be32 *fw_data;
+ table_size = CP_ME_TABLE_SIZE;
+
+ if (me == 0) {
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 1) {
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 2) {
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else {
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ table_offset = CP_MEC_TABLE_OFFSET;
+ }
- for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
- dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
+ }
+ bo_offset += table_size;
}
- bo_offset += CP_ME_TABLE_SIZE;
}
}
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
static int cik_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
+ u32 nop;
int r;
/* enable pcie gen2/3 link */
}
cik_irq_set(rdev);
+ if (rdev->family == CHIP_HAWAII) {
+ if (rdev->new_fw)
+ nop = PACKET3(PACKET3_NOP, 0x3FFF);
+ else
+ nop = RADEON_CP_PACKET2;
+ } else {
+ nop = PACKET3(PACKET3_NOP, 0x3FFF);
+ }
+
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
ring->me = 1; /* first MEC */
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
/* dGPU only have 1 MEC */
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_ucode.h"
#include "radeon_asic.h"
#include "radeon_trace.h"
#include "cikd.h"
reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+ (void)RREG32(reg);
}
/**
*/
static int cik_sdma_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->sdma_fw)
/* halt the MEs */
cik_sdma_enable(rdev, false);
- /* sdma0 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- /* sdma1 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ if (rdev->new_fw) {
+ const struct sdma_firmware_header_v1_0 *hdr =
+ (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_sdma_hdr(&hdr->header);
+
+ /* sdma0 */
+ fw_data = (const __le32 *)
+ (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __le32 *)
+ (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ } else {
+ const __be32 *fw_data;
+
+ /* sdma0 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ }
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
}
/**
- * cik_sdma_vm_set_page - update the page tables using sDMA
+ * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0x1FFFF8)
+ bytes = 0x1FFFF8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+}
+
+/**
+ * cik_sdma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using sDMA (CIK).
+ * Update PTEs by writing them manually using sDMA (CIK).
*/
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if (flags == R600_PTE_GART) {
- uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
- } else if (flags & R600_PTE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & R600_PTE_VALID)
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
}
}
+}
+
+/**
+ * cik_sdma_vm_set_pages - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count;
+ if (ndw > 0x7FFFF)
+ ndw = 0x7FFFF;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+ pe += ndw * 8;
+ addr += ndw * incr;
+ count -= ndw;
+ }
+}
+
+/**
+ * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
+{
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
}
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
AUDIO_LIPSYNC(connector->audio_latency[1]);
else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
} else {
if (connector->latency_present[0])
tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
AUDIO_LIPSYNC(connector->audio_latency[0]);
else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
offset = dig->afmt->pin->offset;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
return;
}
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
offset = dig->afmt->pin->offset;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
return;
}
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
if (rdev->rlc.cp_table_size) {
if (rdev->rlc.cp_table_obj == NULL) {
- r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
+ r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
+ PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.cp_table_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
return;
}
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
return;
}
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
}
/**
- * cayman_dma_vm_set_page - update the page tables using the DMA
+ * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += ndw * 4;
+ src += ndw * 4;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * cayman_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: hw access flags
+ * @flags: hw access flags
*
- * Update the page tables using the DMA (cayman/TN).
+ * Update PTEs by writing them manually using the DMA (cayman/TN).
*/
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & R600_PTE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & R600_PTE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & R600_PTE_VALID)
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
+ 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
}
}
+}
+
+/**
+ * cayman_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
+{
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
{
uint32_t tmp;
- radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
}
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr)
+ uint64_t addr, uint32_t flags)
{
u32 *gtt = rdev->gart.ptr;
gtt[i] = cpu_to_le32(lower_32_bits(addr));
/* Wait until IDLE & CLEAN */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
- RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+ r100_ring_hdp_flush(rdev, ring);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
(void)RREG32(RADEON_CP_RB_WPTR);
}
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
return 0;
}
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
- bool always_indirect)
-{
- if (reg < rdev->rmmio_size && !always_indirect)
- return readl(((void __iomem *)rdev->rmmio) + reg);
- else {
- unsigned long flags;
- uint32_t ret;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
- return ret;
- }
-}
-
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
- bool always_indirect)
-{
- if (reg < rdev->rmmio_size && !always_indirect)
- writel(v, ((void __iomem *)rdev->rmmio) + reg);
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
- }
-}
-
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
mb();
}
+#define R300_PTE_UNSNOOPED (1 << 0)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr)
+ uint64_t addr, uint32_t flags)
{
void __iomem *ptr = rdev->gart.ptr;
addr = (lower_32_bits(addr) >> 8) |
- ((upper_32_bits(addr) & 0xff) << 24) |
- R300_PTE_WRITEABLE | R300_PTE_READABLE;
+ ((upper_32_bits(addr) & 0xff) << 24);
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= R300_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= R300_PTE_WRITEABLE;
+ if (!(flags & RADEON_GART_PAGE_SNOOP))
+ addr |= R300_PTE_UNSNOOPED;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->vram_scratch.robj);
+ 0, NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_GTT, 0,
NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
break;
case 9: /* D1 pflip */
DRM_DEBUG("IH: D1 flip\n");
- radeon_crtc_handle_flip(rdev, 0);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, 0);
break;
case 11: /* D2 pflip */
DRM_DEBUG("IH: D2 flip\n");
- radeon_crtc_handle_flip(rdev, 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, 1);
break;
case 19: /* HPD/DAC hotplug */
switch (src_data) {
}
/**
- * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
* rdev: radeon device structure
- * bo: buffer object struct which userspace is waiting for idle
*
- * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
- * through ring buffer, this leads to corruption in rendering, see
- * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
- * directly perform HDP flush by writing register through MMIO.
+ * Some R6XX/R7XX don't seem to take into account HDP flushes performed
+ * through the ring buffer. This leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
+ * directly perform the HDP flush by writing the register through MMIO.
*/
-void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+void r600_mmio_hdp_flush(struct radeon_device *rdev)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/interval_tree.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
extern int radeon_vm_size;
extern int radeon_vm_block_size;
extern int radeon_deep_color;
+extern int radeon_use_pflipirq;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
u16 *vddc, u16 *vddci,
u16 virtual_voltage_id,
u16 vbios_voltage_id);
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+ u16 virtual_voltage_id,
+ u16 *voltage);
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
u8 voltage_type,
u16 nominal_voltage,
struct atom_voltage_table *voltage_table);
bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
u8 voltage_type, u8 voltage_mode);
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id);
void radeon_atom_update_memory_dll(struct radeon_device *rdev,
u32 mem_clock);
void radeon_atom_set_ac_timing(struct radeon_device *rdev,
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- uint64_t soffset;
- uint64_t eoffset;
uint32_t flags;
- bool valid;
+ uint64_t addr;
unsigned ref_count;
/* protected by vm mutex */
- struct list_head vm_list;
+ struct interval_tree_node it;
struct list_head vm_status;
/* constant after initialization */
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
+ u32 flags;
unsigned pin_count;
void *kptr;
u32 tiling_flags;
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
-int radeon_gem_object_create(struct radeon_device *rdev, int size,
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int alignment, int initial_domain,
- bool discardable, bool kernel,
+ u32 flags, bool kernel,
struct drm_gem_object **obj);
int radeon_mode_dumb_create(struct drm_file *file_priv,
#define RADEON_GPU_PAGE_SHIFT 12
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
+#define RADEON_GART_PAGE_DUMMY 0
+#define RADEON_GART_PAGE_VALID (1 << 0)
+#define RADEON_GART_PAGE_READ (1 << 1)
+#define RADEON_GART_PAGE_WRITE (1 << 2)
+#define RADEON_GART_PAGE_SNOOP (1 << 3)
+
struct radeon_gart {
dma_addr_t table_addr;
struct radeon_bo *robj;
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist,
- dma_addr_t *dma_addr);
-void radeon_gart_restore(struct radeon_device *rdev);
+ dma_addr_t *dma_addr, uint32_t flags);
/*
#define R600_PTE_FRAG_64KB (4 << 7)
#define R600_PTE_FRAG_256KB (6 << 7)
-/* flags used for GART page table entries on R600+ */
-#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \
- | R600_PTE_READABLE | R600_PTE_WRITEABLE)
+/* flags needed to be set so we can copy directly from the GART table */
+#define R600_PTE_GART_MASK ( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
+ R600_PTE_SYSTEM | R600_PTE_VALID )
struct radeon_vm_pt {
struct radeon_bo *bo;
};
struct radeon_vm {
- struct list_head va;
+ struct rb_root va;
unsigned id;
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head invalidated;
+
/* BOs freed, but not yet updated in the PT */
struct list_head freed;
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
int (*asic_reset)(struct radeon_device *rdev);
- /* ioctl hw specific callback. Some hw might want to perform special
- * operation on specific ioctl. For instance on wait idle some hw
- * might want to perform and HDP flush through MMIO as it seems that
- * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
- * through ring.
- */
- void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ /* Flush the HDP cache via MMIO */
+ void (*mmio_hdp_flush)(struct radeon_device *rdev);
/* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
- void (*set_page)(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+ void (*copy_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+ void (*write_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ void (*set_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ void (*pad_ib)(struct radeon_ib *ib);
} vm;
/* ring specific callbacks */
struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
const struct firmware *mec_fw; /* CIK MEC firmware */
+ const struct firmware *mec2_fw; /* KV MEC2 firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
const struct firmware *uvd_fw; /* UVD firmware */
const struct firmware *vce_fw; /* VCE firmware */
+ bool new_fw;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct dev_pm_domain vga_pm_domain;
bool have_disp_power_ref;
+ u32 px_quirk_flags;
+
+ /* tracking pinned memory */
+ u64 vram_pin_size;
+ u64 gart_pin_size;
};
bool radeon_is_px(struct drm_device *dev);
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
- bool always_indirect);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
- bool always_indirect);
+#define RADEON_MIN_MMIO_SIZE 0x10000
+
+static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
+{
+ /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
+ if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+ return readl(((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
+ }
+}
+
+static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
+{
+ if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+ writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+ }
+}
+
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
struct radeon_vm *vm);
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm);
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+ struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
+ .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r100_asic = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
+ .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r300_asic = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .set_page = &cayman_dma_vm_set_page,
+ .copy_pages = &cayman_dma_vm_copy_pages,
+ .write_pages = &cayman_dma_vm_write_pages,
+ .set_pages = &cayman_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .set_page = &cayman_dma_vm_set_page,
+ .copy_pages = &cayman_dma_vm_copy_pages,
+ .write_pages = &cayman_dma_vm_write_pages,
+ .set_pages = &cayman_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
.resume = &si_resume,
.asic_reset = &si_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &si_get_xclk,
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .set_page = &si_dma_vm_set_page,
+ .copy_pages = &si_dma_vm_copy_pages,
+ .write_pages = &si_dma_vm_write_pages,
+ .set_pages = &si_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
.resume = &cik_resume,
.asic_reset = &cik_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .set_page = &cik_sdma_vm_set_page,
+ .copy_pages = &cik_sdma_vm_copy_pages,
+ .write_pages = &cik_sdma_vm_write_pages,
+ .set_pages = &cik_sdma_vm_set_pages,
+ .pad_ib = &cik_sdma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
.resume = &cik_resume,
.asic_reset = &cik_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .set_page = &cik_sdma_vm_set_page,
+ .copy_pages = &cik_sdma_vm_copy_pages,
+ .write_pages = &cik_sdma_vm_write_pages,
+ .set_pages = &cik_sdma_vm_set_pages,
+ .pad_ib = &cik_sdma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
struct radeon_ring *ring);
void r100_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
-
+void r100_ring_hdp_flush(struct radeon_device *rdev,
+ struct radeon_ring *ring);
/*
* r200,rv250,rs300,rv280
*/
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
-extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern void r600_mmio_hdp_flush(struct radeon_device *rdev);
extern bool r600_gui_idle(struct radeon_device *rdev);
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
struct radeon_ib *ib);
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
-void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
+
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"asc7xxx",
};
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"RV6xx",
"RV770",
return 0;
}
+union get_voltage_info {
+ struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
+ struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
+};
+
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+ u16 virtual_voltage_id,
+ u16 *voltage)
+{
+ int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
+ u32 entry_id;
+ u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
+ union get_voltage_info args;
+
+ for (entry_id = 0; entry_id < count; entry_id++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
+ virtual_voltage_id)
+ break;
+ }
+
+ if (entry_id >= count)
+ return -EINVAL;
+
+ args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
+ args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+ args.in.ulSCLKFreq =
+ cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
+
+ return 0;
+}
+
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
u16 voltage_level, u8 voltage_type,
u32 *gpio_value, u32 *gpio_mask)
return false;
}
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+ int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+ union voltage_object_info *voltage_info;
+ union voltage_object *voltage_object = NULL;
+
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ voltage_info = (union voltage_object_info *)
+ (rdev->mode_info.atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 3:
+ switch (crev) {
+ case 1:
+ voltage_object = (union voltage_object *)
+ atom_lookup_voltage_object_v3(&voltage_info->v3,
+ voltage_type,
+ VOLTAGE_OBJ_SVID2);
+ if (voltage_object) {
+ *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+ *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
int radeon_atom_get_max_voltage(struct radeon_device *rdev,
u8 voltage_type, u16 *max_voltage)
{
int time;
n = RADEON_BENCHMARK_ITERATIONS;
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
if (r) {
goto out_cleanup;
}
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
if (r) {
goto out_cleanup;
}
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
- drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
break;
}
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* hdmi deep color only implemented on DCE4+ */
if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
connector->name, bpc);
}
}
+ else if (bpc > 8) {
+ /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+ connector->name);
+ bpc = 8;
+ }
}
- if ((radeon_deep_color == 0) && (bpc > 8))
+ if ((radeon_deep_color == 0) && (bpc > 8)) {
+ DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n",
+ connector->name);
bpc = 8;
+ }
DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
connector->name, connector->display_info.bpc, bpc);
return NULL;
}
+struct edid *radeon_connector_edid(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
+
+ if (radeon_connector->edid) {
+ return radeon_connector->edid;
+ } else if (edid_blob) {
+ struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
+ if (edid)
+ radeon_connector->edid = edid;
+ }
+ return radeon_connector->edid;
+}
+
+static void radeon_connector_get_edid(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->edid)
+ return;
+
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+ if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) &&
+ radeon_connector->ddc_bus->has_aux) {
+ radeon_connector->edid = drm_get_edid(connector,
+ &radeon_connector->ddc_bus->aux.ddc);
+ } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
+ radeon_connector->ddc_bus->has_aux)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->aux.ddc);
+ else if (radeon_connector->ddc_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ } else if (radeon_connector->ddc_bus) {
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ }
+
+ if (!radeon_connector->edid) {
+ if (rdev->is_atom_bios) {
+ /* some laptops provide a hardcoded edid in rom for LCDs */
+ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ } else {
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ }
+ }
+}
+
+static void radeon_connector_free_edid(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+}
+
+static int radeon_ddc_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
+ ret = drm_add_edid_modes(connector, radeon_connector->edid);
+ drm_edid_to_eld(connector, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
return NULL;
}
+static void radeon_get_native_mode(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ struct radeon_encoder *radeon_encoder;
+
+ if (encoder == NULL)
+ return;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode =
+ list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ radeon_encoder->native_mode = *preferred_mode;
+ } else {
+ radeon_encoder->native_mode.clock = 0;
+ }
+}
+
/*
* radeon_connector_analog_encoder_conflict_solve
* - search for other connectors sharing this encoder
radeon_property_change_mode(&radeon_encoder->base);
}
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum radeon_rmx_type rmx_type;
+
+ if (connector->encoder)
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ else {
+ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+ }
+
+ switch (val) {
+ default:
+ case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+ case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+ case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+ case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ }
+ if (radeon_encoder->rmx_type == rmx_type)
+ return 0;
+
+ if ((rmx_type != DRM_MODE_SCALE_NONE) &&
+ (radeon_encoder->native_mode.clock == 0))
+ return 0;
+
+ radeon_encoder->rmx_type = rmx_type;
+
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+
return 0;
}
static int radeon_lvds_get_modes(struct drm_connector *connector)
{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int ret = 0;
struct drm_display_mode *mode;
- if (radeon_connector->ddc_bus) {
- ret = radeon_ddc_get_modes(radeon_connector);
- if (ret > 0) {
- encoder = radeon_best_single_encoder(connector);
- if (encoder) {
- radeon_fixup_lvds_native_mode(encoder, connector);
- /* add scaled modes */
- radeon_add_common_modes(encoder, connector);
- }
- return ret;
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+ if (ret > 0) {
+ encoder = radeon_best_single_encoder(connector);
+ if (encoder) {
+ radeon_fixup_lvds_native_mode(encoder, connector);
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
}
+ return ret;
}
encoder = radeon_best_single_encoder(connector);
}
/* check for edid as well */
+ radeon_connector_get_edid(connector);
if (radeon_connector->edid)
ret = connector_status_connected;
- else {
- if (radeon_connector->ddc_bus) {
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- if (radeon_connector->edid)
- ret = connector_status_connected;
- }
- }
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->edid)
- kfree(radeon_connector->edid);
+ radeon_connector_free_edid(connector);
kfree(radeon_connector->con_priv);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
static int radeon_vga_get_modes(struct drm_connector *connector)
{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+
+ radeon_get_native_mode(connector);
return ret;
}
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector_free_edid(connector);
+ radeon_connector_get_edid(connector);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
ret = connector_status_connected;
} else {
- radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital =
+ !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
- } else
+ } else {
ret = connector_status_connected;
+ }
}
} else {
.set_property = radeon_connector_set_property,
};
-static int radeon_dvi_get_modes(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret;
-
- ret = radeon_ddc_get_modes(radeon_connector);
- return ret;
-}
-
static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector_free_edid(connector);
+ radeon_connector_get_edid(connector);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
/* rs690 seems to have a problem with connectors not existing and always
* return a block of 0's. If we see this just stop polling on this output */
- if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) &&
+ radeon_connector->base.null_edid_counter) {
ret = connector_status_disconnected;
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
connector->name);
broken_edid = true; /* defer use_digital to later */
}
} else {
- radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital =
+ !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
- } else
+ } else {
ret = connector_status_connected;
-
+ }
/* This gets complicated. We have boards with VGA + HDMI with a
* shared DDC line and we have boards with DVI-D + HDMI with a shared
* DDC line. The latter is more complex because with DVI<->HDMI adapters
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
/* hpd is our only option in this case */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
}
}
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
}
static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
- .get_modes = radeon_dvi_get_modes,
+ .get_modes = radeon_vga_get_modes,
.mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
}
if (ret > 0) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+
+ radeon_get_native_mode(connector);
}
return ret;
return ENCODER_OBJECT_ID_NONE;
}
-bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
+static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
goto out;
}
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
+ radeon_connector_free_edid(connector);
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return radeon_dp_mode_valid_helper(connector, mode);
} else {
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
0);
drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- }
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
-
- }
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
if (r)
return r;
}
- return 0;
+
+ return radeon_vm_clear_invalids(rdev, vm);
}
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
"LAST",
};
+#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
+#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+
+struct radeon_px_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 px_quirk_flags;
+};
+
+static struct radeon_px_quirk radeon_px_quirk_list[] = {
+ /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
+ * https://bugzilla.kernel.org/show_bug.cgi?id=74551
+ */
+ { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
+ * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
+ /* macbook pro 8.2 */
+ { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
+ { 0, 0, 0, 0, 0 },
+};
+
bool radeon_is_px(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
return false;
}
+static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
+{
+ struct radeon_px_quirk *p = radeon_px_quirk_list;
+
+ /* Apply PX quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ rdev->px_quirk_flags = p->px_quirk_flags;
+ break;
+ }
+ ++p;
+ }
+
+ if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
+ rdev->flags &= ~RADEON_IS_PX;
+}
+
/**
* radeon_program_register_sequence - program an array of registers.
*
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
+ &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
- if (radeon_vm_block_size < 9) {
+ if (radeon_vm_block_size == -1) {
+
+ /* Total bits covered by PD + PTs */
+ unsigned bits = ilog2(radeon_vm_size) + 17;
+
+ /* Make sure the PD is 4K in size up to 8GB address space.
+ Above that split equal between PD and PTs */
+ if (radeon_vm_size <= 8)
+ radeon_vm_block_size = bits - 9;
+ else
+ radeon_vm_block_size = (bits + 3) / 2;
+
+ } else if (radeon_vm_block_size < 9) {
dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
/**
- * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
- * needed for waking up.
- *
- * @pdev: pci dev pointer
- */
-static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
-{
-
- /* 6600m in a macbook pro */
- if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
- pdev->subsystem_device == 0x00e2) {
- printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
- return true;
- }
-
- return false;
-}
-
-/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct radeon_device *rdev = dev->dev_private;
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+ if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
dev->pdev->d3_delay = 20;
radeon_resume_kms(dev, true, true);
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
+ if (rdev->flags & RADEON_IS_PX)
+ radeon_device_handle_px_quirks(rdev);
+
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
if (radeon_crtc == NULL)
return;
+ /* Skip the pageflip completion check below (based on polling) on
+ * asics which reliably support hw pageflip completion irqs. pflip
+ * irqs are a reliable and race-free method of handling pageflip
+ * completion detection. A use_pflipirq module parameter < 2 allows
+ * to override this in case of asics with faulty pflip irqs.
+ * A module parameter of 0 would only use this polling based path,
+ * a parameter of 1 would use pflip irq only as a backup to this
+ * path, as in Linux 3.16.
+ */
+ if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
+ return;
+
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
return ret;
}
-int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
-{
- struct drm_device *dev = radeon_connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- int ret = 0;
-
- /* don't leak the edid if we already fetched it in detect() */
- if (radeon_connector->edid)
- goto got_edid;
-
- /* on hw with routers, select right port */
- if (radeon_connector->router.ddc_valid)
- radeon_router_select_ddc_port(radeon_connector);
-
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
- ENCODER_OBJECT_ID_NONE) {
- if (radeon_connector->ddc_bus->has_aux)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->aux.ddc);
- } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
- struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
-
- if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
- dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
- radeon_connector->ddc_bus->has_aux)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->aux.ddc);
- else if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- } else {
- if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- }
-
- if (!radeon_connector->edid) {
- if (rdev->is_atom_bios) {
- /* some laptops provide a hardcoded edid in rom for LCDs */
- if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- } else
- /* some servers provide a hardcoded edid in rom for KVMs */
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- }
- if (radeon_connector->edid) {
-got_edid:
- drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
- ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
- drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
- return ret;
- }
- drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
- return 0;
-}
-
/* avivo */
/**
(!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
is_hdtv_mode(mode)))) {
if (radeon_encoder->underscan_hborder != 0)
radeon_crtc->h_border = radeon_encoder->underscan_hborder;
* 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
* 2.39.0 - Add INFO query for number of active CUs
+ * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
+ * CS to GPU
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 39
+#define KMS_DRIVER_MINOR 40
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
-int radeon_vm_size = 4;
-int radeon_vm_block_size = 9;
+int radeon_vm_size = 8;
+int radeon_vm_block_size = -1;
int radeon_deep_color = 0;
+int radeon_use_pflipirq = 2;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
-MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
+MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, radeon_deep_color, int, 0444);
+MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
+module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (pixel_clock > 340000)
return true;
else
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (pixel_clock > 340000)
return true;
else
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
- false, true,
- &gobj);
+ 0, true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->gart.robj);
+ 0, NULL, &rdev->gart.robj);
if (r) {
return r;
}
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base);
+ radeon_gart_set_page(rdev, t, page_base,
+ RADEON_GART_PAGE_DUMMY);
}
page_base += RADEON_GPU_PAGE_SIZE;
}
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: RADEON_GART_PAGE_* flags
*
* Binds the requested pages to the gart page table
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist, dma_addr_t *dma_addr)
+ int pages, struct page **pagelist, dma_addr_t *dma_addr,
+ uint32_t flags)
{
unsigned t;
unsigned p;
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
+ radeon_gart_set_page(rdev, t, page_base, flags);
page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
/**
- * radeon_gart_restore - bind all pages in the gart page table
- *
- * @rdev: radeon_device pointer
- *
- * Binds all pages in the gart page table (all asics).
- * Used to rebuild the gart table on device startup or resume.
- */
-void radeon_gart_restore(struct radeon_device *rdev)
-{
- int i, j, t;
- u64 page_base;
-
- if (!rdev->gart.ptr) {
- return;
- }
- for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
- page_base = rdev->gart.pages_addr[i];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
- page_base += RADEON_GPU_PAGE_SIZE;
- }
- }
- mb();
- radeon_gart_tlb_flush(rdev);
-}
-
-/**
* radeon_gart_init - init the driver info for managing the gart
*
* @rdev: radeon_device pointer
}
}
-int radeon_gem_object_create(struct radeon_device *rdev, int size,
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int alignment, int initial_domain,
- bool discardable, bool kernel,
+ u32 flags, bool kernel,
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
alignment = PAGE_SIZE;
}
- /* maximun bo size is the minimun btw visible vram and gtt size */
- max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ /* Maximum bo size is the unpinned gtt size since we use the gtt to
+ * handle vram to system pool migrations.
+ */
+ max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
if (size > max_size) {
- printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
- __func__, __LINE__, size >> 20, max_size >> 20);
+ DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
+ size >> 20, max_size >> 20);
return -ENOMEM;
}
retry:
- r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
+ flags, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
initial_domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
- DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+ DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
- unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size;
args->vram_visible = (u64)man->size << PAGE_SHIFT;
- if (rdev->stollen_vga_memory)
- args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
- args->vram_visible -= radeon_fbdev_total_size(rdev);
- args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
- for(i = 0; i < RADEON_NUM_RINGS; ++i)
- args->gart_size -= rdev->ring[i].ring_size;
+ args->vram_visible -= rdev->vram_pin_size;
+ args->gart_size = rdev->mc.gtt_size;
+ args->gart_size -= rdev->gart_pin_size;
+
return 0;
}
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
- args->initial_domain, false,
- false, &gobj);
+ args->initial_domain, args->flags,
+ false, &gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
+ uint32_t cur_placement = 0;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, NULL, false);
- /* callback hw specific functions if any */
- if (rdev->asic->ioctl_wait_idle)
- robj->rdev->asic->ioctl_wait_idle(rdev, robj);
+ r = radeon_bo_wait(robj, &cur_placement, false);
+ /* Flush HDP cache via MMIO if necessary */
+ if (rdev->asic->mmio_hdp_flush &&
+ radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
+ robj->rdev->asic->mmio_hdp_flush(rdev);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
}
- if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
- dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
- args->operation = RADEON_VA_RESULT_ERROR;
- return -EINVAL;
- }
switch (args->operation) {
case RADEON_VA_MAP:
switch (args->operation) {
case RADEON_VA_MAP:
- if (bo_va->soffset) {
+ if (bo_va->it.start) {
args->operation = RADEON_VA_RESULT_VA_EXIST;
- args->offset = bo_va->soffset;
+ args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
goto out;
}
r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
args->size = ALIGN(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_device,
- &gobj);
+ RADEON_GEM_DOMAIN_VRAM, 0,
+ false, &gobj);
if (r)
return -ENOMEM;
--- /dev/null
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ * Christian König
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored. You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them. Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
+
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics). IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib *ib, struct radeon_vm *vm,
+ unsigned size)
+{
+ int r;
+
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
+ if (r) {
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
+ return r;
+ }
+
+ r = radeon_semaphore_create(rdev, &ib->semaphore);
+ if (r) {
+ return r;
+ }
+
+ ib->ring = ring;
+ ib->fence = NULL;
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->vm = vm;
+ if (vm) {
+ /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+ * space and soffset is the offset inside the pool bo
+ */
+ ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+ } else {
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ }
+ ib->is_const_ib = false;
+
+ return 0;
+}
+
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
+}
+
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine). Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed. To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE. If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ int r = 0;
+
+ if (!ib->length_dw || !ring->ready) {
+ /* TODO: Nothings in the ib we should report. */
+ dev_err(rdev->dev, "couldn't schedule ib\n");
+ return -EINVAL;
+ }
+
+ /* 64 dwords should be enough for fence too */
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
+ if (r) {
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
+ return r;
+ }
+
+ /* grab a vm id if necessary */
+ if (ib->vm) {
+ struct radeon_fence *vm_id_fence;
+ vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
+ radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
+ }
+
+ /* sync with other rings */
+ r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ if (ib->vm)
+ radeon_vm_flush(rdev, ib->vm, ib->ring);
+
+ if (const_ib) {
+ radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+ radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ }
+ radeon_ring_ib_execute(rdev, ib->ring, ib);
+ r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ if (const_ib) {
+ const_ib->fence = radeon_fence_ref(ib->fence);
+ }
+
+ if (ib->vm)
+ radeon_vm_fence(rdev, ib->vm, ib->fence);
+
+ radeon_ring_unlock_commit(rdev, ring);
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+
+ if (rdev->family >= CHIP_BONAIRE) {
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_GTT_WC);
+ } else {
+ /* Before CIK, it's better to stick to cacheable GTT due
+ * to the command stream checking
+ */
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT, 0);
+ }
+ if (r) {
+ return r;
+ }
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+ if (r) {
+ return r;
+ }
+
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
+ }
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
+ }
+}
+
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (!ring->ready)
+ continue;
+
+ r = radeon_ib_test(rdev, i, ring);
+ if (r) {
+ ring->ready = false;
+ rdev->needs_reset = false;
+
+ if (i == RADEON_RING_TYPE_GFX_INDEX) {
+ /* oh, oh, that's really bad */
+ DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+
+ } else {
+ /* still not good, but we can live with it */
+ DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
+ return 0;
+
+}
+
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
+#endif
+
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
+#else
+ return 0;
+#endif
+}
}
break;
case RADEON_INFO_ACCEL_WORKING2:
- *value = rdev->accel_working;
+ if (rdev->family == CHIP_HAWAII) {
+ if (rdev->accel_working) {
+ if (rdev->new_fw)
+ *value = 3;
+ else
+ *value = 2;
+ } else {
+ *value = 0;
+ }
+ } else {
+ *value = rdev->accel_working;
+ }
break;
case RADEON_INFO_TILING_CONFIG:
if (rdev->family >= CHIP_BONAIRE)
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
-extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
extern int radeon_get_monitor_bpc(struct drm_connector *connector);
+extern struct edid *radeon_connector_edid(struct drm_connector *connector);
+
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
-extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
* function are calling it.
*/
-static void radeon_bo_clear_va(struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va, *tmp;
-
- list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
- /* remove from all vm address space */
- radeon_vm_bo_rmv(bo->rdev, bo_va);
- }
-}
-
static void radeon_update_memory_usage(struct radeon_bo *bo,
unsigned mem_type, int sign)
{
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
- radeon_bo_clear_va(bo);
+ WARN_ON(!list_empty(&bo->va));
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (domain & RADEON_GEM_DOMAIN_GTT) {
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
+ } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+ (rbo->rdev->flags & RADEON_IS_AGP)) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_TT;
} else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
}
}
if (domain & RADEON_GEM_DOMAIN_CPU) {
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+ if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
+ } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+ rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_SYSTEM;
} else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
}
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
- struct sg_table *sg, struct radeon_bo **bo_ptr)
+ u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
RADEON_GEM_DOMAIN_GTT |
RADEON_GEM_DOMAIN_CPU);
+
+ bo->flags = flags;
+ /* PCI GART is always snooped */
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
ttm_bo_kunmap(&bo->kmap);
}
+struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
+{
+ if (bo == NULL)
+ return NULL;
+
+ ttm_bo_reference(&bo->tbo);
+ return bo;
+}
+
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- down_read(&rdev->pm.mclk_lock);
ttm_bo_unref(&tbo);
- up_read(&rdev->pm.mclk_lock);
if (tbo == NULL)
*bo = NULL;
}
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo);
- }
- if (unlikely(r != 0))
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ bo->rdev->vram_pin_size += radeon_bo_size(bo);
+ else
+ bo->rdev->gart_pin_size += radeon_bo_size(bo);
+ } else {
dev_err(bo->rdev->dev, "%p pin failed\n", bo);
+ }
return r;
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r != 0))
+ if (likely(r == 0)) {
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ bo->rdev->vram_pin_size -= radeon_bo_size(bo);
+ else
+ bo->rdev->gart_pin_size -= radeon_bo_size(bo);
+ } else {
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
+ }
return r;
}
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
- bool kernel, u32 domain,
+ bool kernel, u32 domain, u32 flags,
struct sg_table *sg,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain);
+ unsigned size, u32 align, u32 domain,
+ u32 flags);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
case CHIP_RS780:
case CHIP_RS880:
case CHIP_RV770:
- case CHIP_BARTS:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- case CHIP_CAYMAN:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ case CHIP_CAYMAN:
case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
}
radeon_hwmon_fini(rdev);
-
- if (rdev->pm.power_state)
- kfree(rdev->pm.power_state);
+ kfree(rdev->pm.power_state);
}
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
radeon_dpm_fini(rdev);
radeon_hwmon_fini(rdev);
-
- if (rdev->pm.power_state)
- kfree(rdev->pm.power_state);
+ kfree(rdev->pm.power_state);
}
void radeon_pm_fini(struct radeon_device *rdev)
int ret;
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, sg, &bo);
+ RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
if (ret)
return ERR_PTR(ret);
* Jerome Glisse
* Christian König
*/
-#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon_reg.h"
#include "radeon.h"
-#include "atom.h"
-
-/*
- * IB
- * IBs (Indirect Buffers) and areas of GPU accessible memory where
- * commands are stored. You can put a pointer to the IB in the
- * command ring and the hw will fetch the commands from the IB
- * and execute them. Generally userspace acceleration drivers
- * produce command buffers which are send to the kernel and
- * put in IBs for execution by the requested ring.
- */
-static int radeon_debugfs_sa_init(struct radeon_device *rdev);
-
-/**
- * radeon_ib_get - request an IB (Indirect Buffer)
- *
- * @rdev: radeon_device pointer
- * @ring: ring index the IB is associated with
- * @ib: IB object returned
- * @size: requested IB size
- *
- * Request an IB (all asics). IBs are allocated using the
- * suballocator.
- * Returns 0 on success, error on failure.
- */
-int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib *ib, struct radeon_vm *vm,
- unsigned size)
-{
- int r;
-
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
- if (r) {
- dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
- return r;
- }
-
- r = radeon_semaphore_create(rdev, &ib->semaphore);
- if (r) {
- return r;
- }
-
- ib->ring = ring;
- ib->fence = NULL;
- ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
- ib->vm = vm;
- if (vm) {
- /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
- * space and soffset is the offset inside the pool bo
- */
- ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
- } else {
- ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
- }
- ib->is_const_ib = false;
-
- return 0;
-}
-
-/**
- * radeon_ib_free - free an IB (Indirect Buffer)
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to free
- *
- * Free an IB (all asics).
- */
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
- radeon_fence_unref(&ib->fence);
-}
-
-/**
- * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- * @const_ib: Const IB to schedule (SI only)
- *
- * Schedule an IB on the associated ring (all asics).
- * Returns 0 on success, error on failure.
- *
- * On SI, there are two parallel engines fed from the primary ring,
- * the CE (Constant Engine) and the DE (Drawing Engine). Since
- * resource descriptors have moved to memory, the CE allows you to
- * prime the caches while the DE is updating register state so that
- * the resource descriptors will be already in cache when the draw is
- * processed. To accomplish this, the userspace driver submits two
- * IBs, one for the CE and one for the DE. If there is a CE IB (called
- * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
- * to SI there was just a DE IB.
- */
-int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
- struct radeon_ib *const_ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
- int r = 0;
-
- if (!ib->length_dw || !ring->ready) {
- /* TODO: Nothings in the ib we should report. */
- dev_err(rdev->dev, "couldn't schedule ib\n");
- return -EINVAL;
- }
-
- /* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
- if (r) {
- dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
- return r;
- }
-
- /* grab a vm id if necessary */
- if (ib->vm) {
- struct radeon_fence *vm_id_fence;
- vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
- radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
- }
-
- /* sync with other rings */
- r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
- if (r) {
- dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- if (ib->vm)
- radeon_vm_flush(rdev, ib->vm, ib->ring);
-
- if (const_ib) {
- radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
- radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
- }
- radeon_ring_ib_execute(rdev, ib->ring, ib);
- r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
- if (r) {
- dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
- if (const_ib) {
- const_ib->fence = radeon_fence_ref(ib->fence);
- }
-
- if (ib->vm)
- radeon_vm_fence(rdev, ib->vm, ib->fence);
-
- radeon_ring_unlock_commit(rdev, ring);
- return 0;
-}
-
-/**
- * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
- *
- * @rdev: radeon_device pointer
- *
- * Initialize the suballocator to manage a pool of memory
- * for use as IBs (all asics).
- * Returns 0 on success, error on failure.
- */
-int radeon_ib_pool_init(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->ib_pool_ready) {
- return 0;
- }
- r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
- RADEON_IB_POOL_SIZE*64*1024,
- RADEON_GPU_PAGE_SIZE,
- RADEON_GEM_DOMAIN_GTT);
- if (r) {
- return r;
- }
-
- r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
- if (r) {
- return r;
- }
-
- rdev->ib_pool_ready = true;
- if (radeon_debugfs_sa_init(rdev)) {
- dev_err(rdev->dev, "failed to register debugfs file for SA\n");
- }
- return 0;
-}
-
-/**
- * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
- *
- * @rdev: radeon_device pointer
- *
- * Tear down the suballocator managing the pool of memory
- * for use as IBs (all asics).
- */
-void radeon_ib_pool_fini(struct radeon_device *rdev)
-{
- if (rdev->ib_pool_ready) {
- radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
- radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
- rdev->ib_pool_ready = false;
- }
-}
-
-/**
- * radeon_ib_ring_tests - test IBs on the rings
- *
- * @rdev: radeon_device pointer
- *
- * Test an IB (Indirect Buffer) on each ring.
- * If the test fails, disable the ring.
- * Returns 0 on success, error if the primary GFX ring
- * IB test fails.
- */
-int radeon_ib_ring_tests(struct radeon_device *rdev)
-{
- unsigned i;
- int r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_ring *ring = &rdev->ring[i];
-
- if (!ring->ready)
- continue;
-
- r = radeon_ib_test(rdev, i, ring);
- if (r) {
- ring->ready = false;
- rdev->needs_reset = false;
-
- if (i == RADEON_RING_TYPE_GFX_INDEX) {
- /* oh, oh, that's really bad */
- DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
- rdev->accel_working = false;
- return r;
-
- } else {
- /* still not good, but we can live with it */
- DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
- }
- }
- }
- return 0;
-}
/*
* Rings
*/
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
+ /* If we are emitting the HDP flush via the ring buffer, we need to
+ * do it before padding.
+ */
+ if (rdev->asic->ring[ring->idx]->hdp_flush)
+ rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
mb();
+ /* If we are emitting the HDP flush via MMIO, we need to do it after
+ * all CPU writes to VRAM finished.
+ */
+ if (rdev->asic->mmio_hdp_flush)
+ rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring);
}
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
+ (rdev->flags & RADEON_IS_PCIE) ?
+ RADEON_GEM_GTT_WC : 0,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
{"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
};
-static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
-
- return 0;
-
-}
-
-static struct drm_info_list radeon_debugfs_sa_list[] = {
- {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
-};
-
#endif
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
#endif
return 0;
}
-
-static int radeon_debugfs_sa_init(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
-#else
- return 0;
-#endif
-}
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain)
+ unsigned size, u32 align, u32 domain, u32 flags)
{
int i, r;
}
r = radeon_bo_create(rdev, size, align, true,
- domain, NULL, &sa_manager->bo);
+ domain, flags, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- n -= rdev->ring[i].ring_size;
- if (rdev->wb.wb_obj)
- n -= RADEON_GPU_PAGE_SIZE;
- if (rdev->ih.ring_obj)
- n -= rdev->ih.ring_size;
+ n = rdev->mc.gtt_size - rdev->gart_pin_size;
n /= size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &vram_obj);
+ 0, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
struct radeon_fence *fence = NULL;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
+ RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
),
TP_fast_assign(
- __entry->soffset = bo_va->soffset;
- __entry->eoffset = bo_va->eoffset;
+ __entry->soffset = bo_va->it.start;
+ __entry->eoffset = bo_va->it.last + 1;
__entry->flags = bo_va->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
__entry->flags, __entry->count)
);
+TRACE_EVENT(radeon_vm_flush,
+ TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
+ TP_ARGS(pd_addr, ring, id),
+ TP_STRUCT__entry(
+ __field(u64, pd_addr)
+ __field(u32, ring)
+ __field(u32, id)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_addr = pd_addr;
+ __entry->ring = ring;
+ __entry->id = id;
+ ),
+ TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
+ __entry->pd_addr, __entry->ring, __entry->id)
+);
+
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_tt *gtt = (void*)ttm;
+ uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
+ RADEON_GART_PAGE_WRITE;
int r;
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
- r = radeon_gart_bind(gtt->rdev, gtt->offset,
- ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+ if (ttm->caching_state == tt_cached)
+ flags |= RADEON_GART_PAGE_SNOOP;
+ r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM, 0,
NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
--- /dev/null
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_ucode.h"
+
+static void radeon_ucode_print_common_hdr(const struct common_firmware_header *hdr)
+{
+ DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
+ DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
+ DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
+ DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
+ DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
+ DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
+ DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
+ DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
+ DRM_DEBUG("ucode_array_offset_bytes: %u\n",
+ le32_to_cpu(hdr->ucode_array_offset_bytes));
+ DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
+}
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("MC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct mc_firmware_header_v1_0 *mc_hdr =
+ container_of(hdr, struct mc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("io_debug_size_bytes: %u\n",
+ le32_to_cpu(mc_hdr->io_debug_size_bytes));
+ DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
+ le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
+ } else {
+ DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("SMC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct smc_firmware_header_v1_0 *smc_hdr =
+ container_of(hdr, struct smc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
+ } else {
+ DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("GFX\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct gfx_firmware_header_v1_0 *gfx_hdr =
+ container_of(hdr, struct gfx_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(gfx_hdr->ucode_feature_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
+ } else {
+ DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("RLC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct rlc_firmware_header_v1_0 *rlc_hdr =
+ container_of(hdr, struct rlc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr->ucode_feature_version));
+ DRM_DEBUG("save_and_restore_offset: %u\n",
+ le32_to_cpu(rlc_hdr->save_and_restore_offset));
+ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+ DRM_DEBUG("master_pkt_description_offset: %u\n",
+ le32_to_cpu(rlc_hdr->master_pkt_description_offset));
+ } else {
+ DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("SDMA\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct sdma_firmware_header_v1_0 *sdma_hdr =
+ container_of(hdr, struct sdma_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_feature_version));
+ DRM_DEBUG("ucode_change_version: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_change_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
+ } else {
+ DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
+ version_major, version_minor);
+ }
+}
+
+int radeon_ucode_validate(const struct firmware *fw)
+{
+ const struct common_firmware_header *hdr =
+ (const struct common_firmware_header *)fw->data;
+
+ if (fw->size == le32_to_cpu(hdr->size_bytes))
+ return 0;
+
+ return -EINVAL;
+}
+
#define HAWAII_SMC_UCODE_START 0x20000
#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
+struct common_firmware_header {
+ uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
+ uint32_t header_size_bytes; /* size of just the header in bytes */
+ uint16_t header_version_major; /* header version */
+ uint16_t header_version_minor; /* header version */
+ uint16_t ip_version_major; /* IP version */
+ uint16_t ip_version_minor; /* IP version */
+ uint32_t ucode_version;
+ uint32_t ucode_size_bytes; /* size of ucode in bytes */
+ uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
+ uint32_t crc32; /* crc32 checksum of the payload */
+};
+
+/* version_major=1, version_minor=0 */
+struct mc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t io_debug_size_bytes; /* size of debug array in dwords */
+ uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
+};
+
+/* version_major=1, version_minor=0 */
+struct smc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_start_addr;
+};
+
+/* version_major=1, version_minor=0 */
+struct gfx_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t jt_offset; /* jt location */
+ uint32_t jt_size; /* size of jt */
+};
+
+/* version_major=1, version_minor=0 */
+struct rlc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t save_and_restore_offset;
+ uint32_t clear_state_descriptor_offset;
+ uint32_t avail_scratch_ram_locations;
+ uint32_t master_pkt_description_offset;
+};
+
+/* version_major=1, version_minor=0 */
+struct sdma_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t ucode_change_version;
+ uint32_t jt_offset; /* jt location */
+ uint32_t jt_size; /* size of jt */
+};
+
+/* header is fixed size */
+union radeon_firmware_header {
+ struct common_firmware_header common;
+ struct mc_firmware_header_v1_0 mc;
+ struct smc_firmware_header_v1_0 smc;
+ struct gfx_firmware_header_v1_0 gfx;
+ struct rlc_firmware_header_v1_0 rlc;
+ struct sdma_firmware_header_v1_0 sdma;
+ uint8_t raw[0x100];
+};
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
+int radeon_ucode_validate(const struct firmware *fw);
+
#endif
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
/* if we can't remember our last VM flush then flush now! */
- /* XXX figure out why we have to flush all the time */
- if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
+ if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
+ trace_radeon_vm_flush(pd_addr, ring, vm->id);
vm->pd_gpu_addr = pd_addr;
radeon_ring_vm_flush(rdev, ring, vm);
}
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->soffset = 0;
- bo_va->eoffset = 0;
+ bo_va->it.start = 0;
+ bo_va->it.last = 0;
bo_va->flags = 0;
- bo_va->valid = false;
+ bo_va->addr = 0;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->vm_list);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
- list_add(&bo_va->vm_list, &vm->va);
list_add_tail(&bo_va->bo_list, &bo->va);
mutex_unlock(&vm->mutex);
}
/**
+ * radeon_vm_set_pages - helper to call the right asic function
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void radeon_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+ uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+ radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
+
+ } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
+ radeon_asic_vm_write_pages(rdev, ib, pe, addr,
+ count, incr, flags);
+
+ } else {
+ radeon_asic_vm_set_pages(rdev, ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
* radeon_vm_clear_bo - initially clear the page dir/table
*
* @rdev: radeon_device pointer
addr = radeon_bo_gpu_offset(bo);
entries = radeon_bo_size(bo) / 8;
- r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
- NULL, entries * 2 + 64);
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
if (r)
goto error;
ib.length_dw = 0;
- radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
+ radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
+ radeon_asic_vm_pad_ib(rdev, &ib);
+ WARN_ON(ib.length_dw > 64);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r)
uint32_t flags)
{
uint64_t size = radeon_bo_size(bo_va->bo);
- uint64_t eoffset, last_offset = 0;
struct radeon_vm *vm = bo_va->vm;
- struct radeon_bo_va *tmp;
- struct list_head *head;
unsigned last_pfn, pt_idx;
+ uint64_t eoffset;
int r;
if (soffset) {
}
mutex_lock(&vm->mutex);
- head = &vm->va;
- last_offset = 0;
- list_for_each_entry(tmp, &vm->va, vm_list) {
- if (bo_va == tmp) {
- /* skip over currently modified bo */
- continue;
+ if (bo_va->it.start || bo_va->it.last) {
+ if (bo_va->addr) {
+ /* add a clone of the bo_va to clear the old address */
+ struct radeon_bo_va *tmp;
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ tmp->it.start = bo_va->it.start;
+ tmp->it.last = bo_va->it.last;
+ tmp->vm = vm;
+ tmp->addr = bo_va->addr;
+ tmp->bo = radeon_bo_ref(bo_va->bo);
+ list_add(&tmp->vm_status, &vm->freed);
}
- if (soffset >= last_offset && eoffset <= tmp->soffset) {
- /* bo can be added before this one */
- break;
- }
- if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
- /* bo and tmp overlap, invalid offset */
- dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
- bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
- (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
- mutex_unlock(&vm->mutex);
- return -EINVAL;
- }
- last_offset = tmp->eoffset;
- head = &tmp->vm_list;
+ interval_tree_remove(&bo_va->it, &vm->va);
+ bo_va->it.start = 0;
+ bo_va->it.last = 0;
}
- if (bo_va->soffset) {
- /* add a clone of the bo_va to clear the old address */
- tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (!tmp) {
+ soffset /= RADEON_GPU_PAGE_SIZE;
+ eoffset /= RADEON_GPU_PAGE_SIZE;
+ if (soffset || eoffset) {
+ struct interval_tree_node *it;
+ it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+ if (it) {
+ struct radeon_bo_va *tmp;
+ tmp = container_of(it, struct radeon_bo_va, it);
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
+ "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
+ soffset, tmp->bo, tmp->it.start, tmp->it.last);
mutex_unlock(&vm->mutex);
- return -ENOMEM;
+ return -EINVAL;
}
- tmp->soffset = bo_va->soffset;
- tmp->eoffset = bo_va->eoffset;
- tmp->vm = vm;
- list_add(&tmp->vm_status, &vm->freed);
+ bo_va->it.start = soffset;
+ bo_va->it.last = eoffset - 1;
+ interval_tree_insert(&bo_va->it, &vm->va);
}
- bo_va->soffset = soffset;
- bo_va->eoffset = eoffset;
bo_va->flags = flags;
- bo_va->valid = false;
- list_move(&bo_va->vm_list, head);
+ bo_va->addr = 0;
- soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
- eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
+ soffset >>= radeon_vm_block_size;
+ eoffset >>= radeon_vm_block_size;
BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
if (r)
return r;
ndw = 64;
/* assume the worst case */
- ndw += vm->max_pde_used * 16;
+ ndw += vm->max_pde_used * 6;
/* update too big for an IB */
if (ndw > 0xfffff)
((last_pt + incr * count) != pt)) {
if (count) {
- radeon_asic_vm_set_page(rdev, &ib, last_pde,
- last_pt, count, incr,
- R600_PTE_VALID);
+ radeon_vm_set_pages(rdev, &ib, last_pde,
+ last_pt, count, incr,
+ R600_PTE_VALID);
}
count = 1;
}
if (count)
- radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
- incr, R600_PTE_VALID);
+ radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
+ incr, R600_PTE_VALID);
if (ib.length_dw != 0) {
+ radeon_asic_vm_pad_ib(rdev, &ib);
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
+ WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
(frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
return;
}
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
addr += RADEON_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags | frag_flags);
+ radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += RADEON_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
- radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
}
}
unsigned count = 0;
uint64_t addr;
- start = start / RADEON_GPU_PAGE_SIZE;
- end = end / RADEON_GPU_PAGE_SIZE;
-
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> radeon_vm_block_size;
{
struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
- unsigned nptes, ndw;
+ unsigned nptes, ncmds, ndw;
uint64_t addr;
+ uint32_t flags;
int r;
-
- if (!bo_va->soffset) {
+ if (!bo_va->it.start) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
bo_va->bo, vm);
return -EINVAL;
}
- if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
- return 0;
+ list_del_init(&bo_va->vm_status);
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
if (mem) {
addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
- bo_va->valid = true;
}
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
+ bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
+
} else {
addr += rdev->vm_manager.vram_base_offset;
}
} else {
addr = 0;
- bo_va->valid = false;
}
+ if (addr == bo_va->addr)
+ return 0;
+ bo_va->addr = addr;
+
trace_radeon_vm_bo_update(bo_va);
- nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
+ nptes = bo_va->it.last - bo_va->it.start + 1;
+
+ /* reserve space for one command every (1 << BLOCK_SIZE) entries
+ or 2k dwords (whatever is smaller) */
+ ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
/* padding, etc. */
ndw = 64;
- if (radeon_vm_block_size > 11)
- /* reserve space for one header for every 2k dwords */
- ndw += (nptes >> 11) * 4;
- else
- /* reserve space for one header for
- every (1 << BLOCK_SIZE) entries */
- ndw += (nptes >> radeon_vm_block_size) * 4;
+ flags = radeon_vm_page_flags(bo_va->flags);
+ if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+ /* only copy commands needed */
+ ndw += ncmds * 7;
- /* reserve space for pte addresses */
- ndw += nptes * 2;
+ } else if (flags & R600_PTE_SYSTEM) {
+ /* header for write data commands */
+ ndw += ncmds * 4;
+
+ /* body of write data command */
+ ndw += nptes * 2;
+
+ } else {
+ /* set page commands needed */
+ ndw += ncmds * 10;
+
+ /* two extra commands for begin/end of fragment */
+ ndw += 2 * 10;
+ }
/* update too big for an IB */
if (ndw > 0xfffff)
return r;
ib.length_dw = 0;
- radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
- addr, radeon_vm_page_flags(bo_va->flags));
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+ bo_va->it.last + 1, addr,
+ radeon_vm_page_flags(bo_va->flags));
+
+ radeon_asic_vm_pad_ib(rdev, &ib);
+ WARN_ON(ib.length_dw > ndw);
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
int r;
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
- list_del(&bo_va->vm_status);
r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ radeon_bo_unref(&bo_va->bo);
kfree(bo_va);
if (r)
return r;
}
/**
+ * radeon_vm_clear_invalids - clear invalidated BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all invalidated BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
+ r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
list_del(&bo_va->bo_list);
mutex_lock(&vm->mutex);
- list_del(&bo_va->vm_list);
+ interval_tree_remove(&bo_va->it, &vm->va);
+ list_del(&bo_va->vm_status);
- if (bo_va->soffset) {
- bo_va->bo = NULL;
+ if (bo_va->addr) {
+ bo_va->bo = radeon_bo_ref(bo_va->bo);
list_add(&bo_va->vm_status, &vm->freed);
} else {
kfree(bo_va);
struct radeon_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- bo_va->valid = false;
+ if (bo_va->addr) {
+ mutex_lock(&bo_va->vm->mutex);
+ list_del(&bo_va->vm_status);
+ list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
+ mutex_unlock(&bo_va->vm->mutex);
+ }
}
}
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
- INIT_LIST_HEAD(&vm->va);
+ vm->va = RB_ROOT;
+ INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
}
r = radeon_bo_create(rdev, pd_size, align, true,
- RADEON_GEM_DOMAIN_VRAM, NULL,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&vm->page_directory);
if (r)
return r;
struct radeon_bo_va *bo_va, *tmp;
int i, r;
- if (!list_empty(&vm->va)) {
+ if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}
- list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
- list_del_init(&bo_va->vm_list);
+ rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
+ interval_tree_remove(&bo_va->it, &vm->va);
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
kfree(bo_va);
}
}
- list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ radeon_bo_unref(&bo_va->bo);
kfree(bo_va);
+ }
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);
uint32_t size_reg;
uint32_t tmp;
- radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
radeon_gart_table_ram_free(rdev);
}
+#define RS400_PTE_UNSNOOPED (1 << 0)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr, uint32_t flags)
{
uint32_t entry;
u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
- ((upper_32_bits(addr) & 0xff) << 4) |
- RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
+ ((upper_32_bits(addr) & 0xff) << 4);
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= RS400_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= RS400_PTE_WRITEABLE;
+ if (!(flags & RADEON_GART_PAGE_SNOOP))
+ entry |= RS400_PTE_UNSNOOPED;
entry = cpu_to_le32(entry);
gtt[i] = entry;
}
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Enable bus master */
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
radeon_gart_table_vram_free(rdev);
}
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr, uint32_t flags)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- if (addr == rdev->dummy_page.addr)
- addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- else
- addr |= R600_PTE_GART;
+ addr |= R600_PTE_SYSTEM;
+ if (flags & RADEON_GART_PAGE_VALID)
+ addr |= R600_PTE_VALID;
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= R600_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= R600_PTE_WRITEABLE;
+ if (flags & RADEON_GART_PAGE_SNOOP)
+ addr |= R600_PTE_SNOOPED;
writeq(addr, ptr + (i * 8));
}
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
MODULE_FIRMWARE("radeon/VERDE_smc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
MODULE_FIRMWARE("radeon/OLAND_me.bin");
MODULE_FIRMWARE("radeon/OLAND_ce.bin");
MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
MODULE_FIRMWARE("radeon/OLAND_smc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+
MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
MODULE_FIRMWARE("radeon/HAINAN_me.bin");
MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_mc.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
/* ucode loading */
int si_mc_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
+ const __be32 *fw_data = NULL;
+ const __le32 *new_fw_data = NULL;
u32 running, blackout = 0;
- u32 *io_mc_regs;
+ u32 *io_mc_regs = NULL;
+ const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
- ucode_size = rdev->mc_fw->size / 4;
+ if (rdev->new_fw) {
+ const struct mc_firmware_header_v1_0 *hdr =
+ (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
+
+ radeon_ucode_print_mc_hdr(&hdr->header);
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ ucode_size = rdev->mc_fw->size / 4;
- switch (rdev->family) {
- case CHIP_TAHITI:
- io_mc_regs = (u32 *)&tahiti_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_PITCAIRN:
- io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_VERDE:
- default:
- io_mc_regs = (u32 *)&verde_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_OLAND:
- io_mc_regs = (u32 *)&oland_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_HAINAN:
- io_mc_regs = (u32 *)&hainan_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ io_mc_regs = (u32 *)&tahiti_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_VERDE:
+ default:
+ io_mc_regs = (u32 *)&verde_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAINAN:
+ io_mc_regs = (u32 *)&hainan_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ }
+ fw_data = (const __be32 *)rdev->mc_fw->data;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
- WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ if (rdev->new_fw) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ } else {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
}
/* load the MC ucode */
- fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < ucode_size; i++)
- WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ for (i = 0; i < ucode_size; i++) {
+ if (rdev->new_fw)
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ else
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ }
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
static int si_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
- const char *rlc_chip_name;
+ const char *new_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
size_t smc_req_size, mc2_req_size;
char fw_name[30];
int err;
+ int new_fw = 0;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
- rlc_chip_name = "TAHITI";
+ new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
- rlc_chip_name = "PITCAIRN";
+ new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
break;
case CHIP_VERDE:
chip_name = "VERDE";
- rlc_chip_name = "VERDE";
+ new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
break;
case CHIP_OLAND:
chip_name = "OLAND";
- rlc_chip_name = "OLAND";
+ new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
- rlc_chip_name = "HAINAN";
+ new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
default: BUG();
}
- DRM_INFO("Loading %s Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", new_chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
- err = -EINVAL;
- goto out;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->pfp_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->me_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->ce_fw->size != ce_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->ce_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->ce_fw->size != ce_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->ce_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
- "si_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "si_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->rlc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ }
+ if ((rdev->mc_fw->size != mc_req_size) &&
+ (rdev->mc_fw->size != mc2_req_size)) {
+ printk(KERN_ERR
+ "si_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+ } else {
+ err = radeon_ucode_validate(rdev->mc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
goto out;
+ } else {
+ new_fw++;
+ }
}
- if ((rdev->mc_fw->size != mc_req_size) &&
- (rdev->mc_fw->size != mc2_req_size)) {
- printk(KERN_ERR
- "si_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "si_smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "si_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->smc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
+ if (new_fw == 0) {
+ rdev->new_fw = false;
+ } else if (new_fw < 6) {
+ printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
+ err = -EINVAL;
+ } else {
+ rdev->new_fw = true;
+ }
out:
if (err) {
if (err != -EINVAL)
static int si_cp_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
- if (!rdev->me_fw || !rdev->pfp_fw)
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
return -EINVAL;
si_cp_enable(rdev, false);
- /* PFP */
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
- WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __be32 *)rdev->ce_fw->data;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < SI_CE_UCODE_SIZE; i++)
- WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)rdev->me_fw->data;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
- WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *pfp_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ const struct gfx_firmware_header_v1_0 *ce_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ const struct gfx_firmware_header_v1_0 *me_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+ radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+ radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < SI_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ }
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
static int si_rlc_resume(struct radeon_device *rdev)
{
u32 i;
- const __be32 *fw_data;
if (!rdev->rlc_fw)
return -EINVAL;
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- fw_data = (const __be32 *)rdev->rlc_fw->data;
- for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ if (rdev->new_fw) {
+ const struct rlc_firmware_header_v1_0 *hdr =
+ (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+ u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ const __le32 *fw_data = (const __le32 *)
+ (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ radeon_ucode_print_rlc_hdr(&hdr->header);
+
+ for (i = 0; i < fw_size; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+ }
+ } else {
+ const __be32 *fw_data =
+ (const __be32 *)rdev->rlc_fw->data;
+ for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
}
WREG32(RLC_UCODE_ADDR, 0);
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
}
/**
- * si_dma_vm_set_page - update the page tables using the DMA
+ * si_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (SI).
+ */
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0xFFFF8)
+ bytes = 0xFFFF8;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, bytes);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+}
+
+/**
+ * si_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the DMA (SI).
+ * Update PTEs by writing them manually using the DMA (SI).
*/
-void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if (flags == R600_PTE_GART) {
- uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0xFFFF8)
- bytes = 0xFFFF8;
-
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
- 1, 0, 0, bytes);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
- } else if (flags & R600_PTE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & R600_PTE_VALID)
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
}
}
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * si_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
}
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
voltage_table->count = max_voltage_steps;
}
+static int si_get_svi2_voltage_table(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
static int si_construct_voltage_tables(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct si_power_info *si_pi = si_get_pi(rdev);
int ret;
- ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
- if (ret)
- return ret;
+ if (pi->voltage_control) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
- if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &eg_pi->vddc_voltage_table);
+ if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddc_voltage_table);
+ } else if (si_pi->voltage_control_svi2) {
+ ret = si_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
if (eg_pi->vddci_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
SISLANDS_MAX_NO_VREG_STEPS,
&eg_pi->vddci_voltage_table);
}
+ if (si_pi->vddci_control_svi2) {
+ ret = si_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &eg_pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
if (pi->mvdd_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
struct si_power_info *si_pi = si_get_pi(rdev);
u8 i;
- if (eg_pi->vddc_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
-
- for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
- if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
- table->maxVDDCIndexInPPTable = i;
- break;
+ if (si_pi->voltage_control_svi2) {
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+ si_pi->svc_gpio_id);
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+ si_pi->svd_gpio_id);
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+ 2);
+ } else {
+ if (eg_pi->vddc_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+ for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+ if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+ table->maxVDDCIndexInPPTable = i;
+ break;
+ }
}
}
- }
- if (eg_pi->vddci_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
+ if (eg_pi->vddci_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
- }
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ }
- if (si_pi->mvdd_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
+ if (si_pi->mvdd_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
- cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
- }
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+ cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+ }
- if (si_pi->vddc_phase_shed_control) {
- if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
- &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
- si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
+ if (si_pi->vddc_phase_shed_control) {
+ if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+ si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
- table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+ table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
- si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
- (u32)si_pi->vddc_phase_shed_table.phase_delay);
- } else {
- si_pi->vddc_phase_shed_control = false;
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+ (u32)si_pi->vddc_phase_shed_table.phase_delay);
+ } else {
+ si_pi->vddc_phase_shed_control = false;
+ }
}
}
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct si_power_info *si_pi = si_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
if (si_is_smc_running(rdev))
return -EINVAL;
- if (pi->voltage_control)
+ if (pi->voltage_control || si_pi->voltage_control_svi2)
si_enable_voltage_control(rdev, true);
if (pi->mvdd_control)
si_get_mvdd_configuration(rdev);
- if (pi->voltage_control) {
+ if (pi->voltage_control || si_pi->voltage_control_svi2) {
ret = si_construct_voltage_tables(rdev);
if (ret) {
DRM_ERROR("si_construct_voltage_tables failed\n");
ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
pi->voltage_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!pi->voltage_control) {
+ si_pi->voltage_control_svi2 =
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_SVID2);
+ if (si_pi->voltage_control_svi2)
+ radeon_atom_get_svi2_info(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+ }
pi->mvdd_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
eg_pi->vddci_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!eg_pi->vddci_control)
+ si_pi->vddci_control_svi2 =
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_SVID2);
si_pi->vddc_phase_shed_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_PHASE_LUT);
rv770_get_engine_memory_ss(rdev);
bool vddc_phase_shed_control;
bool pspp_notify_required;
bool sclk_deep_sleep_above_low;
+ bool voltage_control_svi2;
+ bool vddci_control_svi2;
/* smc offsets */
u32 sram_end;
u32 state_table_start;
SMC_SIslands_MCRegisters smc_mc_reg_table;
SISLANDS_SMC_STATETABLE smc_statetable;
PP_SIslands_PAPMParameters papm_parm;
+ /* SVI2 */
+ u8 svd_gpio_id;
+ u8 svc_gpio_id;
};
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
if (!rdev->smc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_TAHITI:
- ucode_start_address = TAHITI_SMC_UCODE_START;
- ucode_size = TAHITI_SMC_UCODE_SIZE;
- break;
- case CHIP_PITCAIRN:
- ucode_start_address = PITCAIRN_SMC_UCODE_START;
- ucode_size = PITCAIRN_SMC_UCODE_SIZE;
- break;
- case CHIP_VERDE:
- ucode_start_address = VERDE_SMC_UCODE_START;
- ucode_size = VERDE_SMC_UCODE_SIZE;
- break;
- case CHIP_OLAND:
- ucode_start_address = OLAND_SMC_UCODE_START;
- ucode_size = OLAND_SMC_UCODE_SIZE;
- break;
- case CHIP_HAINAN:
- ucode_start_address = HAINAN_SMC_UCODE_START;
- ucode_size = HAINAN_SMC_UCODE_SIZE;
- break;
- default:
- DRM_ERROR("unknown asic in smc ucode loader\n");
- BUG();
+ if (rdev->new_fw) {
+ const struct smc_firmware_header_v1_0 *hdr =
+ (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+ radeon_ucode_print_smc_hdr(&hdr->header);
+
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ ucode_start_address = TAHITI_SMC_UCODE_START;
+ ucode_size = TAHITI_SMC_UCODE_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ ucode_start_address = PITCAIRN_SMC_UCODE_START;
+ ucode_size = PITCAIRN_SMC_UCODE_SIZE;
+ break;
+ case CHIP_VERDE:
+ ucode_start_address = VERDE_SMC_UCODE_START;
+ ucode_size = VERDE_SMC_UCODE_SIZE;
+ break;
+ case CHIP_OLAND:
+ ucode_start_address = OLAND_SMC_UCODE_START;
+ ucode_size = OLAND_SMC_UCODE_SIZE;
+ break;
+ case CHIP_HAINAN:
+ ucode_start_address = HAINAN_SMC_UCODE_START;
+ ucode_size = HAINAN_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+ src = (const u8 *)rdev->smc_fw->data;
}
if (ucode_size & 3)
return -EINVAL;
- src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
uint64_t vram_visible;
};
-#define RADEON_GEM_NO_BACKING_STORE 1
+#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
+#define RADEON_GEM_GTT_UC (1 << 1)
+#define RADEON_GEM_GTT_WC (1 << 2)
struct drm_radeon_gem_create {
uint64_t size;