Merge tag 'amd-drm-fixes-5.12-2021-03-10' of https://gitlab.freedesktop.org/agd5f...
authorDave Airlie <airlied@redhat.com>
Fri, 12 Mar 2021 01:20:02 +0000 (11:20 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 12 Mar 2021 01:20:02 +0000 (11:20 +1000)
amd-drm-fixes-5.12-2021-03-10:

amdgpu:
- Fix aux backlight control
- Add a backlight override parameter
- Various display fixes
- PCIe DPM fix for vega
- Polaris watermark fixes
- Additional S0ix fix

radeon:
- Fix GEM regression
- Fix AGP dependency handling

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210310221141.3974-1-alexander.deucher@amd.com
26 files changed:
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_prime.c

index e392a90..85b79a7 100644 (file)
@@ -228,6 +228,7 @@ source "drivers/gpu/drm/arm/Kconfig"
 config DRM_RADEON
        tristate "ATI Radeon"
        depends on DRM && PCI && MMU
+       depends on AGP || !AGP
        select FW_LOADER
         select DRM_KMS_HELPER
         select DRM_TTM
index b6879d9..49267eb 100644 (file)
@@ -180,6 +180,7 @@ extern uint amdgpu_smu_memory_pool_size;
 extern uint amdgpu_dc_feature_mask;
 extern uint amdgpu_dc_debug_mask;
 extern uint amdgpu_dm_abm_level;
+extern int amdgpu_backlight;
 extern struct amdgpu_mgpu_info mgpu_info;
 extern int amdgpu_ras_enable;
 extern uint amdgpu_ras_mask;
index 36a741d..2e9b16f 100644 (file)
@@ -903,7 +903,7 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
  */
 bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
 {
-#if defined(CONFIG_AMD_PMC)
+#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
        if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
                if (adev->flags & AMD_IS_APU)
                        return true;
index 4575192..b26e2fd 100644 (file)
@@ -781,6 +781,10 @@ uint amdgpu_dm_abm_level;
 MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
 module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
 
+int amdgpu_backlight = -1;
+MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
+module_param_named(backlight, amdgpu_backlight, bint, 0444);
+
 /**
  * DOC: tmz (int)
  * Trusted Memory Zone (TMZ) is a method to protect data being written
index 51cd49c..24010ca 100644 (file)
@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
        size = mode_cmd->pitches[0] * height;
        aligned_size = ALIGN(size, PAGE_SIZE);
        ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
-                                      ttm_bo_type_kernel, NULL, &gobj);
+                                      ttm_bo_type_device, NULL, &gobj);
        if (ret) {
                pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
                return -ENOMEM;
index 3e1fd1e..573cf17 100644 (file)
@@ -2267,6 +2267,11 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
            caps->ext_caps->bits.hdr_aux_backlight_control == 1)
                caps->aux_support = true;
 
+       if (amdgpu_backlight == 0)
+               caps->aux_support = false;
+       else if (amdgpu_backlight == 1)
+               caps->aux_support = true;
+
        /* From the specification (CTA-861-G), for calculating the maximum
         * luminance we need to use:
         *      Luminance = 50*2**(CV/32)
@@ -3185,19 +3190,6 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
 #endif
 }
 
-static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
-{
-       bool rc;
-
-       if (!link)
-               return 1;
-
-       rc = dc_link_set_backlight_level_nits(link, true, brightness,
-                                             AUX_BL_DEFAULT_TRANSITION_TIME_MS);
-
-       return rc ? 0 : 1;
-}
-
 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
                                unsigned *min, unsigned *max)
 {
@@ -3260,9 +3252,10 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
        brightness = convert_brightness_from_user(&caps, bd->props.brightness);
        // Change brightness based on AUX property
        if (caps.aux_support)
-               return set_backlight_via_aux(link, brightness);
-
-       rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+               rc = dc_link_set_backlight_level_nits(link, true, brightness,
+                                                     AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+       else
+               rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
 
        return rc ? 0 : 1;
 }
@@ -3270,11 +3263,27 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
 {
        struct amdgpu_display_manager *dm = bl_get_data(bd);
-       int ret = dc_link_get_backlight_level(dm->backlight_link);
+       struct amdgpu_dm_backlight_caps caps;
+
+       amdgpu_dm_update_backlight_caps(dm);
+       caps = dm->backlight_caps;
+
+       if (caps.aux_support) {
+               struct dc_link *link = (struct dc_link *)dm->backlight_link;
+               u32 avg, peak;
+               bool rc;
 
-       if (ret == DC_ERROR_UNEXPECTED)
-               return bd->props.brightness;
-       return convert_brightness_to_user(&dm->backlight_caps, ret);
+               rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
+               if (!rc)
+                       return bd->props.brightness;
+               return convert_brightness_to_user(&caps, avg);
+       } else {
+               int ret = dc_link_get_backlight_level(dm->backlight_link);
+
+               if (ret == DC_ERROR_UNEXPECTED)
+                       return bd->props.brightness;
+               return convert_brightness_to_user(&caps, ret);
+       }
 }
 
 static const struct backlight_ops amdgpu_dm_backlight_ops = {
@@ -4716,6 +4725,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
        dc_plane_state->dcc = plane_info.dcc;
        dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
+       dc_plane_state->flip_int_enabled = true;
 
        /*
         * Always set input transfer function, since plane state is refreshed
index fa5059f..bd01010 100644 (file)
@@ -2602,7 +2602,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
                        if (pipe_ctx->plane_state == NULL)
                                frame_ramp = 0;
                } else {
-                       ASSERT(false);
                        return false;
                }
 
index 4eee3a5..18ed0d3 100644 (file)
@@ -887,6 +887,7 @@ struct dc_plane_state {
        int layer_index;
 
        union surface_update_flags update_flags;
+       bool flip_int_enabled;
        /* private to DC core */
        struct dc_plane_status status;
        struct dc_context *ctx;
index 9e796df..714c71a 100644 (file)
@@ -1257,6 +1257,16 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset)
        REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
 }
 
+void hubp1_set_flip_int(struct hubp *hubp)
+{
+       struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+       REG_UPDATE(DCSURF_SURFACE_FLIP_INTERRUPT,
+               SURFACE_FLIP_INT_MASK, 1);
+
+       return;
+}
+
 void hubp1_init(struct hubp *hubp)
 {
        //do nothing
@@ -1290,6 +1300,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
        .dmdata_load = NULL,
        .hubp_soft_reset = hubp1_soft_reset,
        .hubp_in_blank = hubp1_in_blank,
+       .hubp_set_flip_int = hubp1_set_flip_int,
 };
 
 /*****************************************/
index a9a6ed7..e2f2f69 100644 (file)
@@ -74,6 +74,7 @@
        SRI(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id),\
        SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id),\
        SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\
+       SRI(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id),\
        SRI(HUBPRET_CONTROL, HUBPRET, id),\
        SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\
        SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\
        uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \
        uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \
        uint32_t DCSURF_SURFACE_CONTROL; \
+       uint32_t DCSURF_SURFACE_FLIP_INTERRUPT; \
        uint32_t HUBPRET_CONTROL; \
        uint32_t DCN_EXPANSION_MODE; \
        uint32_t DCHUBP_REQ_SIZE_CONFIG; \
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
+       HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
        type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
        type SECONDARY_SURFACE_DCC_EN;\
        type SECONDARY_SURFACE_DCC_IND_64B_BLK;\
+       type SURFACE_FLIP_INT_MASK;\
        type DET_BUF_PLANE1_BASE_ADDRESS;\
        type CROSSBAR_SRC_CB_B;\
        type CROSSBAR_SRC_CR_R;\
@@ -777,4 +781,6 @@ void hubp1_read_state_common(struct hubp *hubp);
 bool hubp1_in_blank(struct hubp *hubp);
 void hubp1_soft_reset(struct hubp *hubp, bool reset);
 
+void hubp1_set_flip_int(struct hubp *hubp);
+
 #endif
index 89912bb..9ba5c62 100644 (file)
@@ -2196,6 +2196,13 @@ static void dcn10_enable_plane(
        if (dc->debug.sanity_checks) {
                hws->funcs.verify_allow_pstate_change_high(dc);
        }
+
+       if (!pipe_ctx->top_pipe
+               && pipe_ctx->plane_state
+               && pipe_ctx->plane_state->flip_int_enabled
+               && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
+                       pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
+
 }
 
 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
index 0df0da2..bec7059 100644 (file)
@@ -1597,6 +1597,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
        .validate_dml_output = hubp2_validate_dml_output,
        .hubp_in_blank = hubp1_in_blank,
        .hubp_soft_reset = hubp1_soft_reset,
+       .hubp_set_flip_int = hubp1_set_flip_int,
 };
 
 
index 0726fb4..5342c30 100644 (file)
@@ -1146,6 +1146,12 @@ void dcn20_enable_plane(
                pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
        }
 
+       if (!pipe_ctx->top_pipe
+               && pipe_ctx->plane_state
+               && pipe_ctx->plane_state->flip_int_enabled
+               && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
+                       pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
+
 //     if (dc->debug.sanity_checks) {
 //             dcn10_verify_allow_pstate_change_high(dc);
 //     }
index f904585..b0c9180 100644 (file)
@@ -838,6 +838,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
        .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
        .hubp_init = hubp21_init,
        .validate_dml_output = hubp21_validate_dml_output,
+       .hubp_set_flip_int = hubp1_set_flip_int,
 };
 
 bool hubp21_construct(
index 072f8c8..173488a 100644 (file)
@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .num_banks = 8,
        .num_chans = 4,
        .vmm_page_size_bytes = 4096,
-       .dram_clock_change_latency_us = 11.72,
+       .dram_clock_change_latency_us = 23.84,
        .return_bus_width_bytes = 64,
        .dispclk_dppclk_vco_speed_mhz = 3600,
        .xfc_bus_transport_time_us = 4,
@@ -1062,8 +1062,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
 {
        int i;
 
-       DC_FP_START();
-
        if (dc->bb_overrides.sr_exit_time_ns) {
                for (i = 0; i < WM_SET_COUNT; i++) {
                          dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
@@ -1088,8 +1086,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
                                dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
                }
        }
-
-       DC_FP_END();
 }
 
 void dcn21_calculate_wm(
@@ -1339,7 +1335,7 @@ static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
        int vlevel = 0;
        int pipe_split_from[MAX_PIPES];
        int pipe_cnt = 0;
-       display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+       display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
        DC_LOGGER_INIT(dc->ctx->logger);
 
        BW_VAL_TRACE_COUNT();
index 88ffa9f..f246125 100644 (file)
@@ -511,6 +511,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
        .hubp_init = hubp3_init,
        .hubp_in_blank = hubp1_in_blank,
        .hubp_soft_reset = hubp1_soft_reset,
+       .hubp_set_flip_int = hubp1_set_flip_int,
 };
 
 bool hubp3_construct(
index 8d0f663..fb7f1de 100644 (file)
@@ -2508,6 +2508,19 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
        .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
 };
 
+#define CTX ctx
+
+#define REG(reg_name) \
+       (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+       uint32_t value = REG_READ(CC_DC_PIPE_DIS);
+       /* Support for max 6 pipes */
+       value = value & 0x3f;
+       return value;
+}
+
 static bool dcn30_resource_construct(
        uint8_t num_virtual_links,
        struct dc *dc,
@@ -2517,6 +2530,15 @@ static bool dcn30_resource_construct(
        struct dc_context *ctx = dc->ctx;
        struct irq_service_init_data init_data;
        struct ddc_service_init_data ddc_init_data;
+       uint32_t pipe_fuses = read_pipe_fuses(ctx);
+       uint32_t num_pipes = 0;
+
+       if (!(pipe_fuses == 0 || pipe_fuses == 0x3e)) {
+               BREAK_TO_DEBUGGER();
+               dm_error("DC: Unexpected fuse recipe for navi2x !\n");
+               /* fault to single pipe */
+               pipe_fuses = 0x3e;
+       }
 
        DC_FP_START();
 
@@ -2650,6 +2672,15 @@ static bool dcn30_resource_construct(
        /* PP Lib and SMU interfaces */
        init_soc_bounding_box(dc, pool);
 
+       num_pipes = dcn3_0_ip.max_num_dpp;
+
+       for (i = 0; i < dcn3_0_ip.max_num_dpp; i++)
+               if (pipe_fuses & 1 << i)
+                       num_pipes--;
+
+       dcn3_0_ip.max_num_dpp = num_pipes;
+       dcn3_0_ip.max_num_otg = num_pipes;
+
        dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
 
        /* IRQ */
index 5d4b2c6..c494235 100644 (file)
@@ -1619,12 +1619,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
        dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
 }
 
+static void calculate_wm_set_for_vlevel(
+               int vlevel,
+               struct wm_range_table_entry *table_entry,
+               struct dcn_watermarks *wm_set,
+               struct display_mode_lib *dml,
+               display_e2e_pipe_params_st *pipes,
+               int pipe_cnt)
+{
+       double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
+
+       ASSERT(vlevel < dml->soc.num_states);
+       /* only pipe 0 is read for voltage and dcf/soc clocks */
+       pipes[0].clks_cfg.voltage = vlevel;
+       pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
+       pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
+
+       dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
+       dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
+       dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
+
+       wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
+       wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
+       wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
+       wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+       wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
+       wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
+       wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+       wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
+       dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
+
+}
+
+static void dcn301_calculate_wm_and_dlg(
+               struct dc *dc, struct dc_state *context,
+               display_e2e_pipe_params_st *pipes,
+               int pipe_cnt,
+               int vlevel_req)
+{
+       int i, pipe_idx;
+       int vlevel, vlevel_max;
+       struct wm_range_table_entry *table_entry;
+       struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
+
+       ASSERT(bw_params);
+
+       vlevel_max = bw_params->clk_table.num_entries - 1;
+
+       /* WM Set D */
+       table_entry = &bw_params->wm_table.entries[WM_D];
+       if (table_entry->wm_type == WM_TYPE_RETRAINING)
+               vlevel = 0;
+       else
+               vlevel = vlevel_max;
+       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
+                                               &context->bw_ctx.dml, pipes, pipe_cnt);
+       /* WM Set C */
+       table_entry = &bw_params->wm_table.entries[WM_C];
+       vlevel = min(max(vlevel_req, 2), vlevel_max);
+       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
+                                               &context->bw_ctx.dml, pipes, pipe_cnt);
+       /* WM Set B */
+       table_entry = &bw_params->wm_table.entries[WM_B];
+       vlevel = min(max(vlevel_req, 1), vlevel_max);
+       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
+                                               &context->bw_ctx.dml, pipes, pipe_cnt);
+
+       /* WM Set A */
+       table_entry = &bw_params->wm_table.entries[WM_A];
+       vlevel = min(vlevel_req, vlevel_max);
+       calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
+                                               &context->bw_ctx.dml, pipes, pipe_cnt);
+
+       for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+               if (!context->res_ctx.pipe_ctx[i].stream)
+                       continue;
+
+               pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+               pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+               if (dc->config.forced_clocks) {
+                       pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+                       pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+               }
+               if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+                       pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+               if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+                       pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+               pipe_idx++;
+       }
+
+       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+}
+
 static struct resource_funcs dcn301_res_pool_funcs = {
        .destroy = dcn301_destroy_resource_pool,
        .link_enc_create = dcn301_link_encoder_create,
        .panel_cntl_create = dcn301_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
-       .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
+       .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn30_add_stream_to_ctx,
index 22f3f64..346dcd8 100644 (file)
@@ -191,6 +191,8 @@ struct hubp_funcs {
        bool (*hubp_in_blank)(struct hubp *hubp);
        void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
 
+       void (*hubp_set_flip_int)(struct hubp *hubp);
+
 };
 
 #endif
index c57dc9a..a2681fe 100644 (file)
@@ -5216,10 +5216,10 @@ static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
                for (j = 0; j < dep_sclk_table->count; j++) {
                        valid_entry = false;
                        for (k = 0; k < watermarks->num_wm_sets; k++) {
-                               if (dep_sclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz &&
-                                   dep_sclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz &&
-                                   dep_mclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz &&
-                                   dep_mclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz) {
+                               if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 &&
+                                   dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 &&
+                                   dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 &&
+                                   dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) {
                                        valid_entry = true;
                                        table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
                                        break;
index 29c9964..22b636e 100644 (file)
@@ -1505,6 +1505,48 @@ static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+       struct vega10_hwmgr *data =
+                       (struct vega10_hwmgr *)(hwmgr->backend);
+       uint32_t pcie_gen = 0, pcie_width = 0;
+       PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+       int i;
+
+       if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+               pcie_gen = 3;
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+               pcie_gen = 2;
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+               pcie_gen = 1;
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+               pcie_gen = 0;
+
+       if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+               pcie_width = 6;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+               pcie_width = 5;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+               pcie_width = 4;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+               pcie_width = 3;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+               pcie_width = 2;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+               pcie_width = 1;
+
+       for (i = 0; i < NUM_LINK_LEVELS; i++) {
+               if (pp_table->PcieGenSpeed[i] > pcie_gen)
+                       pp_table->PcieGenSpeed[i] = pcie_gen;
+
+               if (pp_table->PcieLaneCount[i] > pcie_width)
+                       pp_table->PcieLaneCount[i] = pcie_width;
+       }
+
+       return 0;
+}
+
 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
 {
        int result = -1;
@@ -2556,6 +2598,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
                        "Failed to initialize Link Level!",
                        return result);
 
+       result = vega10_override_pcie_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+                       "Failed to override pcie parameters!",
+                       return result);
+
        result = vega10_populate_all_graphic_levels(hwmgr);
        PP_ASSERT_WITH_CODE(!result,
                        "Failed to initialize Graphics Level!",
@@ -2922,6 +2969,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
        return 0;
 }
 
+
 static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
 {
        struct vega10_hwmgr *data = hwmgr->backend;
index c075302..43e01d8 100644 (file)
@@ -481,6 +481,67 @@ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
        dpm_state->hard_max_level = 0xffff;
 }
 
+static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+       struct vega12_hwmgr *data =
+                       (struct vega12_hwmgr *)(hwmgr->backend);
+       uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
+       PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+       int i;
+       int ret;
+
+       if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+               pcie_gen = 3;
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+               pcie_gen = 2;
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+               pcie_gen = 1;
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+               pcie_gen = 0;
+
+       if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+               pcie_width = 6;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+               pcie_width = 5;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+               pcie_width = 4;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+               pcie_width = 3;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+               pcie_width = 2;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+               pcie_width = 1;
+
+       /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+        * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+        * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
+        */
+       for (i = 0; i < NUM_LINK_LEVELS; i++) {
+               pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
+                       pp_table->PcieGenSpeed[i];
+               pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
+                       pp_table->PcieLaneCount[i];
+
+               if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
+                   pp_table->PcieLaneCount[i]) {
+                       smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
+                       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+                               NULL);
+                       PP_ASSERT_WITH_CODE(!ret,
+                               "[OverridePcieParameters] Attempt to override pcie params failed!",
+                               return ret);
+               }
+
+               /* update the pptable */
+               pp_table->PcieGenSpeed[i] = pcie_gen_arg;
+               pp_table->PcieLaneCount[i] = pcie_width_arg;
+       }
+
+       return 0;
+}
+
 static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
                PPCLK_e clk_id, uint32_t *num_of_levels)
 {
@@ -968,6 +1029,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
                        "Failed to enable all smu features!",
                        return result);
 
+       result = vega12_override_pcie_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+                       "[EnableDPMTasks] Failed to override pcie parameters!",
+                       return result);
+
        tmp_result = vega12_power_control_set_level(hwmgr);
        PP_ASSERT_WITH_CODE(!tmp_result,
                        "Failed to power control set level!",
index 87811b0..f19964c 100644 (file)
@@ -831,7 +831,9 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
        struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
        struct vega20_hwmgr *data =
                        (struct vega20_hwmgr *)(hwmgr->backend);
-       uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
+       uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
+       PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+       int i;
        int ret;
 
        if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
@@ -860,17 +862,27 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
         * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
         * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
         */
-       smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
-       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
-                       NULL);
-       PP_ASSERT_WITH_CODE(!ret,
-               "[OverridePcieParameters] Attempt to override pcie params failed!",
-               return ret);
+       for (i = 0; i < NUM_LINK_LEVELS; i++) {
+               pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
+                       pp_table->PcieGenSpeed[i];
+               pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
+                       pp_table->PcieLaneCount[i];
+
+               if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
+                   pp_table->PcieLaneCount[i]) {
+                       smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
+                       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+                               NULL);
+                       PP_ASSERT_WITH_CODE(!ret,
+                               "[OverridePcieParameters] Attempt to override pcie params failed!",
+                               return ret);
+               }
 
-       data->pcie_parameters_override = true;
-       data->pcie_gen_level1 = pcie_gen;
-       data->pcie_width_level1 = pcie_width;
+               /* update the pptable */
+               pp_table->PcieGenSpeed[i] = pcie_gen_arg;
+               pp_table->PcieLaneCount[i] = pcie_width_arg;
+       }
 
        return 0;
 }
@@ -3319,9 +3331,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                        data->od8_settings.od8_settings_array;
        OverDriveTable_t *od_table =
                        &(data->smc_state_table.overdrive_table);
-       struct phm_ppt_v3_information *pptable_information =
-               (struct phm_ppt_v3_information *)hwmgr->pptable;
-       PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
+       PPTable_t *pptable = &(data->smc_state_table.pp_table);
        struct pp_clock_levels_with_latency clocks;
        struct vega20_single_dpm_table *fclk_dpm_table =
                        &(data->dpm_table.fclk_table);
@@ -3420,13 +3430,9 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                current_lane_width =
                        vega20_get_current_pcie_link_width_level(hwmgr);
                for (i = 0; i < NUM_LINK_LEVELS; i++) {
-                       if (i == 1 && data->pcie_parameters_override) {
-                               gen_speed = data->pcie_gen_level1;
-                               lane_width = data->pcie_width_level1;
-                       } else {
-                               gen_speed = pptable->PcieGenSpeed[i];
-                               lane_width = pptable->PcieLaneCount[i];
-                       }
+                       gen_speed = pptable->PcieGenSpeed[i];
+                       lane_width = pptable->PcieLaneCount[i];
+
                        size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
                                        (gen_speed == 0) ? "2.5GT/s," :
                                        (gen_speed == 1) ? "5.0GT/s," :
index f09989b..3effc8c 100644 (file)
@@ -574,6 +574,8 @@ struct radeon_gem {
        struct list_head        objects;
 };
 
+extern const struct drm_gem_object_funcs radeon_gem_object_funcs;
+
 int radeon_gem_init(struct radeon_device *rdev);
 void radeon_gem_fini(struct radeon_device *rdev);
 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
index 9418269..db14a82 100644 (file)
@@ -43,7 +43,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
 int radeon_gem_prime_pin(struct drm_gem_object *obj);
 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
 
-static const struct drm_gem_object_funcs radeon_gem_object_funcs;
+const struct drm_gem_object_funcs radeon_gem_object_funcs;
 
 static void radeon_gem_object_free(struct drm_gem_object *gobj)
 {
@@ -227,7 +227,7 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
        return r;
 }
 
-static const struct drm_gem_object_funcs radeon_gem_object_funcs = {
+const struct drm_gem_object_funcs radeon_gem_object_funcs = {
        .free = radeon_gem_object_free,
        .open = radeon_gem_object_open,
        .close = radeon_gem_object_close,
index ab29eb9..42a8794 100644 (file)
@@ -56,6 +56,8 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
        if (ret)
                return ERR_PTR(ret);
 
+       bo->tbo.base.funcs = &radeon_gem_object_funcs;
+
        mutex_lock(&rdev->gem.mutex);
        list_add_tail(&bo->list, &rdev->gem.objects);
        mutex_unlock(&rdev->gem.mutex);