#include "dc/inc/hw/dmcu.h"
#include "dc/inc/hw/abm.h"
#include "dc/dc_dmub_srv.h"
+#include "dc/dc_edid_parser.h"
+#include "dc/dc_stat.h"
#include "amdgpu_dm_trace.h"
#include "vid.h"
#include "ivsrcid/ivsrcid_vislands30.h"
+#include "i2caux_interface.h"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
-#include <drm/drm_hdcp.h>
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
+#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
* DOC: overview
*
* The AMDgpu display manager, **amdgpu_dm** (or even simpler,
- * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
* requests into DC requests, and DC responses into DRM responses.
*
* The root control structure is &struct amdgpu_display_manager.
/* basic init/fini API */
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
+static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
{
static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
/*
* dm_vblank_get_counter
*
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
+static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
+ return true;
+ else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
+ return true;
+ else
+ return false;
+}
+
/**
* dm_pflip_high_irq() - Handle pageflip interrupt
* @interrupt_params: ignored
/* IRQ could occur when in initial stage */
/* TODO work and BO cleanup */
if (amdgpu_crtc == NULL) {
- DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+ DC_LOG_PFLIP("CRTC is null, returning.\n");
return;
}
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+ DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED,
amdgpu_crtc->crtc_id,
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
- DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
- amdgpu_crtc->crtc_id, amdgpu_crtc,
- vrr_active, (int) !e);
+ DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc,
+ vrr_active, (int) !e);
}
static void dm_vupdate_high_irq(void *interrupt_params)
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
+ struct drm_device *drm_dev;
+ struct drm_vblank_crtc *vblank;
+ ktime_t frame_duration_ns, previous_timestamp;
unsigned long flags;
int vrr_active;
if (acrtc) {
vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+ drm_dev = acrtc->base.dev;
+ vblank = &drm_dev->vblank[acrtc->base.index];
+ previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
+ frame_duration_ns = vblank->time - previous_timestamp;
+
+ if (frame_duration_ns > 0) {
+ trace_amdgpu_refresh_rate_track(acrtc->base.index,
+ frame_duration_ns,
+ ktime_divns(NSEC_PER_SEC, frame_duration_ns));
+ atomic64_set(&irq_params->previous_timestamp, vblank->time);
+ }
- DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+ DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
acrtc->crtc_id,
vrr_active);
vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
- DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+ DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
vrr_active, acrtc->dm_irq_params.active_planes);
/**
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+/**
+ * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
+ * DCN generation ASICs
+ * @interrupt params - interrupt parameters
+ *
+ * Used to set crc window/read out crc value at vertical line 0 position
+ */
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
+
+ if (!acrtc)
+ return;
+
+ amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
+}
+#endif
+
+/**
+ * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
+ * @interrupt_params: used for determining the Outbox instance
+ *
+ * Handles the Outbox Interrupt
+ * event handler.
+ */
+#define DMUB_TRACE_MAX_READ 64
+static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+{
+ struct dmub_notification notify;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dmcub_trace_buf_entry entry = { 0 };
+ uint32_t count = 0;
+
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
+ do {
+ dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
+ } while (notify.pending_notification);
+
+ if (adev->dm.dmub_notify)
+ memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification));
+ if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
+ complete(&adev->dm.dmub_aux_transfer_done);
+ // TODO : HPD Implementation
+
+ } else {
+ DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
+ }
+ }
+
+
+ do {
+ if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+ trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+ entry.param0, entry.param1);
+
+ DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+ } else
+ break;
+
+ count++;
+
+ } while (count <= DMUB_TRACE_MAX_READ);
+
+ ASSERT(count <= DMUB_TRACE_MAX_READ);
+}
+#endif
+
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
if (vblank_work->enable)
dm->active_vblank_irq_count++;
- else
+ else if(dm->active_vblank_irq_count)
dm->active_vblank_irq_count--;
+ dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
- dc_allow_idle_optimizations(
- dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
-
- DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
-
+ DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
mutex_unlock(&dm->dc_lock);
}
init_data.flags.power_down_display_on_boot = true;
+ INIT_LIST_HEAD(&adev->dm.da_list);
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
dc_init_callbacks(adev->dm.dc, &init_params);
}
#endif
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
+#endif
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ init_completion(&adev->dm.dmub_aux_transfer_done);
+ adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
+ if (!adev->dm.dmub_notify) {
+ DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ goto error;
+ }
+ amdgpu_dm_outbox_init(adev);
+ }
+
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
amdgpu_dm_destroy_drm_device(&adev->dm);
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (adev->dm.crc_rd_wrk) {
+ flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
+ kfree(adev->dm.crc_rd_wrk);
+ adev->dm.crc_rd_wrk = NULL;
+ }
+#endif
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue) {
hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
if (adev->dm.dc)
dc_deinit_callbacks(adev->dm.dc);
#endif
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (adev->dm.vblank_workqueue) {
+ adev->dm.vblank_workqueue->dm = NULL;
+ kfree(adev->dm.vblank_workqueue);
+ adev->dm.vblank_workqueue = NULL;
+ }
+#endif
+
if (adev->dm.dc->ctx->dmub_srv) {
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
adev->dm.dc->ctx->dmub_srv = NULL;
}
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ kfree(adev->dm.dmub_notify);
+ adev->dm.dmub_notify = NULL;
+ }
+
if (adev->dm.dmub_bo)
amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
&adev->dm.dmub_bo_gpu_addr,
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
return 0;
case CHIP_NAVI12:
dmub_asic = DMUB_ASIC_DCN302;
fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
break;
+ case CHIP_BEIGE_GOBY:
+ dmub_asic = DMUB_ASIC_DCN303;
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
if (acrtc && state->stream_status[i].plane_count != 0) {
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
- DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
- acrtc->crtc_id, enable ? "en" : "dis", rc);
+ DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
if (rc)
DRM_WARN("Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct amdgpu_device *adev = drm_to_adev(dev);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
#endif
+ if (adev->dm.disable_hpd_irq)
+ return;
+
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
* retired.
*/
- if (dc_link->type != dc_connection_mst_branch)
- mutex_lock(&aconnector->hpd_lock);
+ mutex_lock(&aconnector->hpd_lock);
read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
}
}
- mutex_lock(&adev->dm.dc_lock);
+ if (!amdgpu_in_reset(adev)) {
+ mutex_lock(&adev->dm.dc_lock);
#ifdef CONFIG_DRM_AMD_DC_HDCP
result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
#else
result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
#endif
- mutex_unlock(&adev->dm.dc_lock);
+ mutex_unlock(&adev->dm.dc_lock);
+ }
out:
if (result && !is_mst_root_connector) {
}
#endif
- if (dc_link->type != dc_connection_mst_branch) {
+ if (dc_link->type != dc_connection_mst_branch)
drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
- mutex_unlock(&aconnector->hpd_lock);
- }
+
+ mutex_unlock(&aconnector->hpd_lock);
}
static void register_hpd_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ static const unsigned int vrtl_int_srcid[] = {
+ DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
+ };
+#endif
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
adev, &int_params, dm_crtc_high_irq, c_irq_params);
}
+ /* Use otg vertical line interrupt */
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
+ vrtl_int_srcid[i], &adev->vline0_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vline0 irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
+
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
+ DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
+ break;
+ }
+
+ c_irq_params = &adev->dm.vline0_params[int_params.irq_source
+ - DC_IRQ_SOURCE_DC1_VLINE0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
+ }
+#endif
+
/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
* to trigger at end of each vblank, regardless of state of the lock,
return 0;
}
+/* Register Outbox IRQ sources and initialize IRQ callbacks */
+static int register_outbox_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r, i;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
+ &adev->dmub_outbox_irq);
+ if (r) {
+ DRM_ERROR("Failed to add outbox irq id!\n");
+ return r;
+ }
+
+ if (dc->ctx->dmub_srv) {
+ i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.dmub_outbox_params[0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dmub_outbox1_low_irq, c_irq_params);
+ }
+
+ return 0;
+}
#endif
/*
max - min);
}
-static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ u32 user_brightness)
{
- struct amdgpu_display_manager *dm = bl_get_data(bd);
struct amdgpu_dm_backlight_caps caps;
- struct dc_link *link = NULL;
- u32 brightness;
+ struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
+ u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
bool rc;
+ int i;
amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps;
- link = (struct dc_link *)dm->backlight_link;
+ for (i = 0; i < dm->num_of_edps; i++) {
+ dm->brightness[i] = user_brightness;
+ brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
+ link[i] = (struct dc_link *)dm->backlight_link[i];
+ }
- brightness = convert_brightness_from_user(&caps, bd->props.brightness);
- // Change brightness based on AUX property
- if (caps.aux_support)
- rc = dc_link_set_backlight_level_nits(link, true, brightness,
- AUX_BL_DEFAULT_TRANSITION_TIME_MS);
- else
- rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+ /* Change brightness based on AUX property */
+ if (caps.aux_support) {
+ for (i = 0; i < dm->num_of_edps; i++) {
+ rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+ if (!rc) {
+ DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < dm->num_of_edps; i++) {
+ rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
+ if (!rc) {
+ DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
+ break;
+ }
+ }
+ }
return rc ? 0 : 1;
}
-static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+ amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
+
+ return 0;
+}
+
+static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
+{
struct amdgpu_dm_backlight_caps caps;
amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps;
if (caps.aux_support) {
- struct dc_link *link = (struct dc_link *)dm->backlight_link;
+ struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
u32 avg, peak;
bool rc;
rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
if (!rc)
- return bd->props.brightness;
+ return dm->brightness[0];
return convert_brightness_to_user(&caps, avg);
} else {
- int ret = dc_link_get_backlight_level(dm->backlight_link);
+ int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
if (ret == DC_ERROR_UNEXPECTED)
- return bd->props.brightness;
+ return dm->brightness[0];
return convert_brightness_to_user(&caps, ret);
}
}
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+ return amdgpu_dm_backlight_get_level(dm);
+}
+
static const struct backlight_ops amdgpu_dm_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = amdgpu_dm_backlight_get_brightness,
{
char bl_name[16];
struct backlight_properties props = { 0 };
+ int i;
amdgpu_dm_update_backlight_caps(dm);
+ for (i = 0; i < dm->num_of_edps; i++)
+ dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = AMDGPU_MAX_BL_LEVEL;
* DM initialization because not having a backlight control
* is better then a black screen.
*/
- amdgpu_dm_register_backlight_device(dm);
+ if (!dm->backlight_dev)
+ amdgpu_dm_register_backlight_device(dm);
- if (dm->backlight_dev)
- dm->backlight_link = link;
+ if (dm->backlight_dev) {
+ dm->backlight_link[dm->num_of_edps] = link;
+ dm->num_of_edps++;
+ }
}
#endif
}
goto fail;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Use Outbox interrupt */
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ case CHIP_RENOIR:
+ if (register_outbox_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
+ }
+#endif
+
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
+ case CHIP_BEIGE_GOBY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
#endif
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16;
+ /*
+ * For reasons we don't (yet) fully understand a non-zero
+ * src_y coordinate into an NV12 buffer can cause a
+ * system hang. To avoid hangs (and maybe be overly cautious)
+ * let's reject both non-zero src_x and src_y.
+ *
+ * We currently know of only one use-case to reproduce a
+ * scenario with non-zero src_x and src_y for NV12, which
+ * is to gesture the YouTube Android app into full screen
+ * on ChromeOS.
+ */
+ if (state->fb &&
+ state->fb->format->format == DRM_FORMAT_NV12 &&
+ (scaling_info->src_rect.x != 0 ||
+ scaling_info->src_rect.y != 0))
+ return -EINVAL;
+
scaling_info->src_rect.width = state->src_w >> 16;
if (scaling_info->src_rect.width == 0)
return -EINVAL;
if (adev->asic_type == CHIP_SIENNA_CICHLID ||
adev->asic_type == CHIP_NAVY_FLOUNDER ||
adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
+ adev->asic_type == CHIP_BEIGE_GOBY ||
adev->asic_type == CHIP_VANGOGH)
tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
}
{
struct amdgpu_device *adev = drm_to_adev(plane->dev);
const struct drm_format_info *info = drm_format_info(format);
+ int i;
enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
return false;
/*
- * We always have to allow this modifier, because core DRM still
- * checks LINEAR support if userspace does not provide modifers.
+ * We always have to allow these modifiers:
+ * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
+ * 2. Not passing any modifiers is the same as explicitly passing INVALID.
*/
- if (modifier == DRM_FORMAT_MOD_LINEAR)
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == DRM_FORMAT_MOD_INVALID) {
return true;
+ }
+
+ /* Check that the modifier is on the list of the plane's supported modifiers. */
+ for (i = 0; i < plane->modifier_count; i++) {
+ if (modifier == plane->modifiers[i])
+ break;
+ }
+ if (i == plane->modifier_count)
+ return false;
/*
* For D swizzle the canonical modifier depends on the bpp, so check
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
to_amdgpu_framebuffer(plane_state->fb);
- struct drm_format_name_buf format_name;
int ret;
memset(plane_info, 0, sizeof(*plane_info));
break;
default:
DRM_ERROR(
- "Unsupported screen format %s\n",
- drm_get_format_name(fb->format->format, &format_name));
+ "Unsupported screen format %p4cc\n",
+ &fb->format->format);
return -EINVAL;
}
stream->src = src;
stream->dst = dst;
- DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
- dst.x, dst.y, dst.width, dst.height);
+ DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
}
timing_out->hdmi_vic = hv_frame.vic;
}
- timing_out->h_addressable = mode_in->crtc_hdisplay;
- timing_out->h_total = mode_in->crtc_htotal;
- timing_out->h_sync_width =
- mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
- timing_out->h_front_porch =
- mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
- timing_out->v_total = mode_in->crtc_vtotal;
- timing_out->v_addressable = mode_in->crtc_vdisplay;
- timing_out->v_front_porch =
- mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
- timing_out->v_sync_width =
- mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
- timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+ if (is_freesync_video_mode(mode_in, aconnector)) {
+ timing_out->h_addressable = mode_in->hdisplay;
+ timing_out->h_total = mode_in->htotal;
+ timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+ timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+ timing_out->v_total = mode_in->vtotal;
+ timing_out->v_addressable = mode_in->vdisplay;
+ timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+ timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+ timing_out->pix_clk_100hz = mode_in->clock * 10;
+ } else {
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+ }
+
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
stream->output_color_space = get_output_color_space(timing_out);
static void set_multisync_trigger_params(
struct dc_stream_state *stream)
{
+ struct dc_stream_state *master = NULL;
+
if (stream->triggered_crtc_reset.enabled) {
- stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
- stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
+ master = stream->triggered_crtc_reset.event_source;
+ stream->triggered_crtc_reset.event =
+ master->timing.flags.VSYNC_POSITIVE_POLARITY ?
+ CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
+ stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
}
}
static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
{
int i = 0;
+ struct dc_stream_state *stream;
if (context->stream_count < 2)
return;
* crtc_sync_master.multi_sync_enabled flag
* For now it's set to false
*/
- set_multisync_trigger_params(context->streams[i]);
}
+
set_master_stream(context->streams, context->stream_count);
+
+ for (i = 0; i < context->stream_count ; i++) {
+ stream = context->streams[i];
+
+ if (!stream)
+ continue;
+
+ set_multisync_trigger_params(stream);
+ }
+}
+
+static struct drm_display_mode *
+get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
+ bool use_probed_modes)
+{
+ struct drm_display_mode *m, *m_pref = NULL;
+ u16 current_refresh, highest_refresh;
+ struct list_head *list_head = use_probed_modes ?
+ &aconnector->base.probed_modes :
+ &aconnector->base.modes;
+
+ if (aconnector->freesync_vid_base.clock != 0)
+ return &aconnector->freesync_vid_base;
+
+ /* Find the preferred mode */
+ list_for_each_entry (m, list_head, head) {
+ if (m->type & DRM_MODE_TYPE_PREFERRED) {
+ m_pref = m;
+ break;
+ }
+ }
+
+ if (!m_pref) {
+ /* Probably an EDID with no preferred mode. Fallback to first entry */
+ m_pref = list_first_entry_or_null(
+ &aconnector->base.modes, struct drm_display_mode, head);
+ if (!m_pref) {
+ DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ return NULL;
+ }
+ }
+
+ highest_refresh = drm_mode_vrefresh(m_pref);
+
+ /*
+ * Find the mode with highest refresh rate with same resolution.
+ * For some monitors, preferred mode is not the mode with highest
+ * supported refresh rate.
+ */
+ list_for_each_entry (m, list_head, head) {
+ current_refresh = drm_mode_vrefresh(m);
+
+ if (m->hdisplay == m_pref->hdisplay &&
+ m->vdisplay == m_pref->vdisplay &&
+ highest_refresh < current_refresh) {
+ highest_refresh = current_refresh;
+ m_pref = m;
+ }
+ }
+
+ aconnector->freesync_vid_base = *m_pref;
+ return m_pref;
+}
+
+static bool is_freesync_video_mode(const struct drm_display_mode *mode,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_display_mode *high_mode;
+ int timing_diff;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!high_mode || !mode)
+ return false;
+
+ timing_diff = high_mode->vtotal - mode->vtotal;
+
+ if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
+ high_mode->hdisplay != mode->hdisplay ||
+ high_mode->vdisplay != mode->vdisplay ||
+ high_mode->hsync_start != mode->hsync_start ||
+ high_mode->hsync_end != mode->hsync_end ||
+ high_mode->htotal != mode->htotal ||
+ high_mode->hskew != mode->hskew ||
+ high_mode->vscan != mode->vscan ||
+ high_mode->vsync_start - mode->vsync_start != timing_diff ||
+ high_mode->vsync_end - mode->vsync_end != timing_diff)
+ return false;
+ else
+ return true;
}
static struct dc_stream_state *
dm_state ? &dm_state->base : NULL;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
+ struct drm_display_mode saved_mode;
+ struct drm_display_mode *freesync_mode = NULL;
bool native_mode_found = false;
- bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+ bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh;
int preferred_refresh = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
uint32_t link_bandwidth_kbps;
#endif
struct dc_sink *sink = NULL;
+
+ memset(&saved_mode, 0, sizeof(saved_mode));
+
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
return stream;
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
- decide_crtc_timing_for_drm_display_mode(
+ recalculate_timing |= amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
+ if (recalculate_timing) {
+ freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+ saved_mode = mode;
+ mode = *freesync_mode;
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode,
dm_state ? (dm_state->scaling != RMX_OFF) : false);
+ }
+
preferred_refresh = drm_mode_vrefresh(preferred_mode);
}
- if (!dm_state)
+ if (recalculate_timing)
+ drm_mode_set_crtcinfo(&saved_mode, 0);
+ else if (!dm_state)
drm_mode_set_crtcinfo(&mode, 0);
- /*
+ /*
* If scaling is enabled and refresh rate didn't change
* we copy the vic and polarities of the old timings
*/
- if (!scale || mode_refresh != preferred_refresh)
- fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, con_state, NULL, requested_bpc);
+ if (!recalculate_timing || mode_refresh != preferred_refresh)
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, &aconnector->base, con_state, NULL,
+ requested_bpc);
else
- fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, con_state, old_stream, requested_bpc);
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, &aconnector->base, con_state, old_stream,
+ requested_bpc);
stream->timing.flags.DSC = 0;
state->abm_level = cur->abm_level;
state->vrr_supported = cur->vrr_supported;
state->freesync_config = cur->freesync_config;
- state->crc_src = cur->crc_src;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
-
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
}
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
+{
+ crtc_debugfs_init(crtc);
+
+ return 0;
+}
+#endif
+
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
- DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
- acrtc->crtc_id, enable ? "en" : "dis", rc);
+ DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
return rc;
}
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ .late_register = amdgpu_dm_crtc_late_register,
+#endif
};
static enum drm_connector_status
} while (stream == NULL && requested_bpc >= 6);
+ if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
+ DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+
+ aconnector->force_yuv420_output = true;
+ stream = create_validate_stream_for_sink(aconnector, drm_mode,
+ dm_state, old_stream);
+ aconnector->force_yuv420_output = false;
+ }
+
return stream;
}
return 0;
}
-static bool
-is_hdr_metadata_different(const struct drm_connector_state *old_state,
- const struct drm_connector_state *new_state)
-{
- struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
- struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
-
- if (old_blob != new_blob) {
- if (old_blob && new_blob &&
- old_blob->length == new_blob->length)
- return memcmp(old_blob->data, new_blob->data,
- old_blob->length);
-
- return true;
- }
-
- return false;
-}
-
static int
amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state)
if (!crtc)
return 0;
- if (is_hdr_metadata_different(old_con_state, new_con_state)) {
+ if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
struct dc_info_packet hdr_infopacket;
ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
{
struct dc_stream_state *stream = NULL;
struct drm_connector *connector;
- struct drm_connector_state *new_con_state, *old_con_state;
+ struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
int i, j, clock, bpp;
int vcpi, pbn_div, pbn = 0;
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
aconnector = to_amdgpu_dm_connector(connector);
int r;
if (!new_state->fb) {
- DRM_DEBUG_DRIVER("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
- /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
- * which is still OK to satisfy the condition below, thereby also covering these cases
- * (when plane is completely outside of screen).
- * x2 for width is because of pipe-split.
- */
- if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
+ if (viewport_width < 0 || viewport_height < 0) {
+ DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
+ return -EINVAL;
+ } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
+ DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
return -EINVAL;
+ } else if (viewport_height < MIN_VIEWPORT_SIZE) {
+ DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
+ return -EINVAL;
+ }
+
}
/* Get min/max allowed scaling factors from plane caps. */
}
static int dm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct amdgpu_device *adev = drm_to_adev(plane->dev);
struct dc *dc = adev->dm.dc;
struct dm_plane_state *dm_plane_state;
struct drm_crtc_state *new_crtc_state;
int ret;
- trace_amdgpu_dm_plane_atomic_check(state);
+ trace_amdgpu_dm_plane_atomic_check(new_plane_state);
- dm_plane_state = to_dm_plane_state(state);
+ dm_plane_state = to_dm_plane_state(new_plane_state);
if (!dm_plane_state->dc_state)
return 0;
new_crtc_state =
- drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
if (!new_crtc_state)
return -EINVAL;
- ret = dm_plane_helper_check_state(state, new_crtc_state);
+ ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
- ret = fill_dc_scaling_info(state, &scaling_info);
+ ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
if (ret)
return ret;
}
static int dm_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_plane_state *new_plane_state)
+ struct drm_atomic_state *state)
{
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
}
static void dm_plane_atomic_async_update(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_plane_state *old_state =
- drm_atomic_get_old_plane_state(new_state->state, plane);
+ drm_atomic_get_old_plane_state(state, plane);
trace_amdgpu_dm_atomic_update_cursor(new_state);
*/
drm_mode_sort(&connector->probed_modes);
amdgpu_dm_get_native_mode(connector);
+
+ /* Freesync capabilities are reset by calling
+ * drm_add_edid_modes() and need to be
+ * restored here.
+ */
+ amdgpu_dm_update_freesync_caps(connector, edid);
} else {
amdgpu_dm_connector->num_modes = 0;
}
}
+static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
+ struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+
+ list_for_each_entry (m, &aconnector->base.probed_modes, head) {
+ if (drm_mode_equal(m, mode))
+ return true;
+ }
+
+ return false;
+}
+
+static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
+{
+ const struct drm_display_mode *m;
+ struct drm_display_mode *new_mode;
+ uint i;
+ uint32_t new_modes_count = 0;
+
+ /* Standard FPS values
+ *
+ * 23.976 - TV/NTSC
+ * 24 - Cinema
+ * 25 - TV/PAL
+ * 29.97 - TV/NTSC
+ * 30 - TV/NTSC
+ * 48 - Cinema HFR
+ * 50 - TV/PAL
+ * 60 - Commonly used
+ * 48,72,96 - Multiples of 24
+ */
+ const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
+ 48000, 50000, 60000, 72000, 96000 };
+
+ /*
+ * Find mode with highest refresh rate with the same resolution
+ * as the preferred mode. Some monitors report a preferred mode
+ * with lower resolution than the highest refresh rate supported.
+ */
+
+ m = get_highest_refresh_rate_mode(aconnector, true);
+ if (!m)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
+ uint64_t target_vtotal, target_vtotal_diff;
+ uint64_t num, den;
+
+ if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
+ continue;
+
+ if (common_rates[i] < aconnector->min_vfreq * 1000 ||
+ common_rates[i] > aconnector->max_vfreq * 1000)
+ continue;
+
+ num = (unsigned long long)m->clock * 1000 * 1000;
+ den = common_rates[i] * (unsigned long long)m->htotal;
+ target_vtotal = div_u64(num, den);
+ target_vtotal_diff = target_vtotal - m->vtotal;
+
+ /* Check for illegal modes */
+ if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
+ m->vsync_end + target_vtotal_diff < m->vsync_start ||
+ m->vtotal + target_vtotal_diff < m->vsync_end)
+ continue;
+
+ new_mode = drm_mode_duplicate(aconnector->base.dev, m);
+ if (!new_mode)
+ goto out;
+
+ new_mode->vtotal += (u16)target_vtotal_diff;
+ new_mode->vsync_start += (u16)target_vtotal_diff;
+ new_mode->vsync_end += (u16)target_vtotal_diff;
+ new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ new_mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (!is_duplicate_mode(aconnector, new_mode)) {
+ drm_mode_probed_add(&aconnector->base, new_mode);
+ new_modes_count += 1;
+ } else
+ drm_mode_destroy(aconnector->base.dev, new_mode);
+ }
+ out:
+ return new_modes_count;
+}
+
+static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (!(amdgpu_freesync_vid_mode && edid))
+ return;
+
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ amdgpu_dm_connector->num_modes +=
+ add_fs_modes(amdgpu_dm_connector);
+}
+
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
amdgpu_dm_connector_add_common_modes(encoder, connector);
+ amdgpu_dm_connector_add_freesync_modes(connector, edid);
}
amdgpu_dm_fbc_init(connector);
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP) {
- drm_object_attach_property(
- &aconnector->base.base,
- dm->ddev->mode_config.hdr_output_metadata_property, 0);
+ drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
if (!aconnector->mst_port)
drm_connector_attach_vrr_capable_property(&aconnector->base);
adev,
&adev->pageflip_irq,
irq_type);
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_get(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
} else {
-
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_put(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
amdgpu_irq_put(
adev,
&adev->pageflip_irq,
int x, y;
int xorigin = 0, yorigin = 0;
- position->enable = false;
- position->x = 0;
- position->y = 0;
-
if (!crtc || !plane->state->fb)
return 0;
struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint64_t address = afb ? afb->address : 0;
- struct dc_cursor_position position;
+ struct dc_cursor_position position = {0};
struct dc_cursor_attributes attributes;
int ret;
if (!plane->state->fb && !old_plane_state->fb)
return;
- DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
- __func__,
- amdgpu_crtc->crtc_id,
- plane->state->crtc_w,
- plane->state->crtc_h);
+ DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
+ __func__,
+ amdgpu_crtc->crtc_id,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
ret = get_cursor_position(plane, crtc, &position);
if (ret)
/* Mark this event as consumed */
acrtc->base.state->event = NULL;
- DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
- acrtc->crtc_id);
+ DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
}
static void update_freesync_state_on_stream(
struct amdgpu_device *adev = dm->adev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
unsigned long flags;
+ bool pack_sdp_v1_3 = false;
if (!new_stream)
return;
&vrr_params,
PACKET_TYPE_VRR,
TRANSFER_FUNC_UNKNOWN,
- &vrr_infopacket);
+ &vrr_infopacket,
+ pack_sdp_v1_3);
new_crtc_state->freesync_timing_changed |=
(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
if (new_crtc_state->vrr_supported &&
config.min_refresh_in_uhz &&
config.max_refresh_in_uhz) {
- config.state = new_crtc_state->base.vrr_enabled ?
- VRR_STATE_ACTIVE_VARIABLE :
- VRR_STATE_INACTIVE;
+ /*
+ * if freesync compatible mode was set, config.state will be set
+ * in atomic check
+ */
+ if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
+ (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
+ new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
+ vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
+ vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
+ vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
+ vrr_params.state = VRR_STATE_ACTIVE_FIXED;
+ } else {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ }
} else {
config.state = VRR_STATE_UNSUPPORTED;
}
static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
{
struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *old_plane_state;
int i;
/*
* TODO: Make this per-stream so we don't issue redundant updates for
* commits with multiple streams.
*/
- for_each_oldnew_plane_in_state(state, plane, old_plane_state,
- new_plane_state, i)
+ for_each_old_plane_in_state(state, plane, old_plane_state, i)
if (plane->type == DRM_PLANE_TYPE_CURSOR)
handle_cursor_update(plane, old_plane_state);
}
&bundle->flip_addrs[planes_count].address,
afb->tmz_surface, false);
- DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+ DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
bundle->plane_infos[planes_count].dcc.enable);
dc_plane,
bundle->flip_addrs[planes_count].flip_timestamp_in_us);
- DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
+ DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
__func__,
bundle->flip_addrs[planes_count].address.grph.addr.high_part,
bundle->flip_addrs[planes_count].address.grph.addr.low_part);
* re-adjust the min/max bounds now that DC doesn't handle this
* as part of commit.
*/
- if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
- amdgpu_dm_vrr_active(acrtc_state)) {
+ if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
dc_stream_adjust_vmin_vmax(
dm->dc, acrtc_state->stream,
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- DRM_DEBUG_DRIVER(
+ DRM_DEBUG_ATOMIC(
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
- DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+ DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
if (!dm_new_crtc_state->stream) {
/*
crtc->hwmode = new_crtc_state->mode;
mode_set_reset_required = true;
} else if (modereset_required(new_crtc_state)) {
- DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+ DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
/* i.e. reset mode */
if (dm_old_crtc_state->stream)
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
mode_set_reset_required = true;
}
} /* for_each_crtc_in_state() */
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
WARN_ON(!dc_commit_state(dm->dc, dc_state));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc,true);
+#endif
mutex_unlock(&dm->dc_lock);
}
hdcp_update_display(
adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
new_con_state->hdcp_content_type,
- new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
- : false);
+ new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
#endif
dm_old_crtc_state->abm_level;
hdr_changed =
- is_hdr_metadata_different(old_con_state, new_con_state);
+ !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-
+#ifdef CONFIG_DEBUG_FS
+ bool configure_crc = false;
+ enum amdgpu_dm_pipe_crc_source cur_crc_src;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
+#endif
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ cur_crc_src = acrtc->dm_irq_params.crc_src;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+#endif
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (new_crtc_state->active &&
*/
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
- amdgpu_dm_crtc_configure_crc_source(
- crtc, dm_new_crtc_state,
- dm_new_crtc_state->crc_src);
+ if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
+ configure_crc = true;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_dm_crc_window_is_activated(crtc)) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ acrtc->dm_irq_params.crc_window.update_win = true;
+ acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
+ spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
+ crc_rd_wrk->crtc = crtc;
+ spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
+#endif
}
+
+ if (configure_crc)
+ if (amdgpu_dm_crtc_configure_crc_source(
+ crtc, dm_new_crtc_state, cur_crc_src))
+ DRM_DEBUG_DRIVER("Failed to configure crc source");
#endif
}
}
/* Update audio instances for each connector. */
amdgpu_dm_commit_audio(dev, state);
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ /* restore the backlight level */
+ if (dm->backlight_dev)
+ amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
+#endif
/*
* send vblank event on all events not handled in flip and
* mark consumed event for drm_atomic_helper_commit_hw_done
to_amdgpu_dm_connector(new_con_state->base.connector);
struct drm_display_mode *mode = &new_crtc_state->base.mode;
int vrefresh = drm_mode_vrefresh(mode);
+ bool fs_vid_mode = false;
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
vrefresh >= aconnector->min_vfreq &&
if (new_crtc_state->vrr_supported) {
new_crtc_state->stream->ignore_msa_timing_param = true;
- config.state = new_crtc_state->base.vrr_enabled ?
- VRR_STATE_ACTIVE_VARIABLE :
- VRR_STATE_INACTIVE;
- config.min_refresh_in_uhz =
- aconnector->min_vfreq * 1000000;
- config.max_refresh_in_uhz =
- aconnector->max_vfreq * 1000000;
+ fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+
+ config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
+ config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
config.vsif_supported = true;
config.btr = true;
- }
+ if (fs_vid_mode) {
+ config.state = VRR_STATE_ACTIVE_FIXED;
+ config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
+ goto out;
+ } else if (new_crtc_state->base.vrr_enabled) {
+ config.state = VRR_STATE_ACTIVE_VARIABLE;
+ } else {
+ config.state = VRR_STATE_INACTIVE;
+ }
+ }
+out:
new_crtc_state->freesync_config = config;
}
sizeof(new_crtc_state->vrr_infopacket));
}
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_display_mode old_mode, new_mode;
+
+ if (!old_crtc_state || !new_crtc_state)
+ return false;
+
+ old_mode = old_crtc_state->mode;
+ new_mode = new_crtc_state->mode;
+
+ if (old_mode.clock == new_mode.clock &&
+ old_mode.hdisplay == new_mode.hdisplay &&
+ old_mode.vdisplay == new_mode.vdisplay &&
+ old_mode.htotal == new_mode.htotal &&
+ old_mode.vtotal != new_mode.vtotal &&
+ old_mode.hsync_start == new_mode.hsync_start &&
+ old_mode.vsync_start != new_mode.vsync_start &&
+ old_mode.hsync_end == new_mode.hsync_end &&
+ old_mode.vsync_end != new_mode.vsync_end &&
+ old_mode.hskew == new_mode.hskew &&
+ old_mode.vscan == new_mode.vscan &&
+ (old_mode.vsync_end - old_mode.vsync_start) ==
+ (new_mode.vsync_end - new_mode.vsync_start))
+ return true;
+
+ return false;
+}
+
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
+ uint64_t num, den, res;
+ struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
+
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
+
+ num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
+ den = (unsigned long long)new_crtc_state->mode.htotal *
+ (unsigned long long)new_crtc_state->mode.vtotal;
+
+ res = div_u64(num, den);
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
+}
+
static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct drm_atomic_state *state,
struct drm_crtc *crtc,
* TODO: Refactor this function to allow this check to work
* in all conditions.
*/
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
+ is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
+ goto skip_modeset;
+
if (dm_new_crtc_state->stream &&
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- DRM_DEBUG_DRIVER(
+ DRM_DEBUG_ATOMIC(
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
if (!dm_old_crtc_state->stream)
goto skip_modeset;
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+ is_timing_unchanged_for_freesync(new_crtc_state,
+ old_crtc_state)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER(
+ "Mode change not required for front porch change, "
+ "setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+
+ set_freesync_fixed_config(dm_new_crtc_state);
+
+ goto skip_modeset;
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
+ is_freesync_video_mode(&new_crtc_state->mode,
+ aconnector)) {
+ set_freesync_fixed_config(dm_new_crtc_state);
+ }
+
ret = dm_atomic_get_state(state, &dm_state);
if (ret)
goto fail;
dc_stream_retain(new_stream);
- DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
+ crtc->base.id);
if (dc_add_stream_to_ctx(
dm->dc,
if (!dc_new_plane_state)
return -ENOMEM;
- DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
- plane->base.id, new_plane_crtc->base.id);
+ DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
ret = fill_dc_plane_attributes(
drm_to_adev(new_plane_crtc->dev),
new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
- if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
+ if (!new_cursor_state || !new_primary_state ||
+ !new_cursor_state->fb || !new_primary_state->fb) {
return 0;
}
}
#endif
+static int validate_overlay(struct drm_atomic_state *state)
+{
+ int i;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *primary_state, *overlay_state = NULL;
+
+ /* Check if primary plane is contained inside overlay */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ overlay_state = new_plane_state;
+ continue;
+ }
+ }
+
+ /* check if we're making changes to the overlay plane */
+ if (!overlay_state)
+ return 0;
+
+ /* check if overlay plane is enabled */
+ if (!overlay_state->crtc)
+ return 0;
+
+ /* find the primary plane for the CRTC that the overlay is enabled on */
+ primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+
+ /* check if primary plane is enabled */
+ if (!primary_state->crtc)
+ return 0;
+
+ /* Perform the bounds check to ensure the overlay plane covers the primary */
+ if (primary_state->crtc_x < overlay_state->crtc_x ||
+ primary_state->crtc_y < overlay_state->crtc_y ||
+ primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
+ primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
+ DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (adev->asic_type >= CHIP_NAVI10) {
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);
goto fail;
}
+ ret = validate_overlay(state);
+ if (ret)
+ goto fail;
+
/* Add new/modified planes */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
return capable;
}
+
+static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+ uint8_t *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ int i;
+ struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+ struct dc *dc = adev->dm.dc;
+
+ /* send extension block to DMCU for parsing */
+ for (i = 0; i < len; i += 8) {
+ bool res;
+ int offset;
+
+ /* send 8 bytes a time */
+ if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
+ return false;
+
+ if (i+8 == len) {
+ /* EDID block sent completed, expect result */
+ int version, min_rate, max_rate;
+
+ res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
+ if (res) {
+ /* amd vsdb found */
+ vsdb_info->freesync_supported = 1;
+ vsdb_info->amd_vsdb_version = version;
+ vsdb_info->min_refresh_rate_hz = min_rate;
+ vsdb_info->max_refresh_rate_hz = max_rate;
+ return true;
+ }
+ /* not amd vsdb */
+ return false;
+ }
+
+ /* check for ack*/
+ res = dc_edid_parser_recv_cea_ack(dc, &offset);
+ if (!res)
+ return false;
+ }
+
+ return false;
+}
+
+static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ uint8_t *edid_ext = NULL;
+ int i;
+ bool valid_vsdb_found = false;
+
+ /*----- drm_find_cea_extension() -----*/
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return -ENODEV;
+
+ /*----- cea_db_offsets() -----*/
+ if (edid_ext[0] != CEA_EXT)
+ return -ENODEV;
+
+ valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
+
+ return valid_vsdb_found ? i : -ENODEV;
+}
+
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct edid *edid)
{
- int i;
- bool edid_check_required;
+ int i = 0;
struct detailed_timing *timing;
struct detailed_non_pixel *data;
struct detailed_data_monitor_range *range;
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
bool freesync_capable = false;
+ struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
dm_con_state = to_dm_connector_state(connector->state);
- edid_check_required = false;
if (!amdgpu_dm_connector->dc_sink) {
DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
goto update;
}
if (!adev->dm.freesync_module)
goto update;
- /*
- * if edid non zero restrict freesync only for dp and edp
- */
- if (edid) {
- if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+
+
+ if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+ || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ bool edid_check_required = false;
+
+ if (edid) {
edid_check_required = is_dp_capable_without_timing_msa(
adev->dm.dc,
amdgpu_dm_connector);
}
- }
- if (edid_check_required == true && (edid->version > 1 ||
- (edid->version == 1 && edid->revision > 1))) {
- for (i = 0; i < 4; i++) {
- timing = &edid->detailed_timings[i];
- data = &timing->data.other_data;
- range = &data->data.range;
- /*
- * Check if monitor has continuous frequency mode
- */
- if (data->type != EDID_DETAIL_MONITOR_RANGE)
- continue;
- /*
- * Check for flag range limits only. If flag == 1 then
- * no additional timing information provided.
- * Default GTF, GTF Secondary curve and CVT are not
- * supported
- */
- if (range->flags != 1)
- continue;
+ if (edid_check_required == true && (edid->version > 1 ||
+ (edid->version == 1 && edid->revision > 1))) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+ range = &data->data.range;
+ /*
+ * Check if monitor has continuous frequency mode
+ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != 1)
+ continue;
- amdgpu_dm_connector->min_vfreq = range->min_vfreq;
- amdgpu_dm_connector->max_vfreq = range->max_vfreq;
- amdgpu_dm_connector->pixel_clock_mhz =
- range->pixel_clock_mhz * 10;
+ amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+ amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
- connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
- connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+ connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+ connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
- break;
- }
+ break;
+ }
- if (amdgpu_dm_connector->max_vfreq -
- amdgpu_dm_connector->min_vfreq > 10) {
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10) {
- freesync_capable = true;
+ freesync_capable = true;
+ }
+ }
+ } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported) {
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
}
}
return value;
}
+
+int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
+ struct aux_payload *payload, enum aux_return_code_type *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ int ret = 0;
+
+ dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
+ ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
+ if (ret == 0) {
+ *operation_result = AUX_RET_ERROR_TIMEOUT;
+ return -1;
+ }
+ *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
+
+ if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
+ (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
+
+ // For read case, Copy data to payload
+ if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
+ (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
+ memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
+ adev->dm.dmub_notify->aux_reply.length);
+ }
+
+ return adev->dm.dmub_notify->aux_reply.length;
+}