Merge branch 'drm-next' into drm-next-5.3
authorAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Jun 2019 13:42:25 +0000 (08:42 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 25 Jun 2019 13:42:25 +0000 (08:42 -0500)
Backmerge drm-next and fix up conflicts due to drmP.h removal.

Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
84 files changed:
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
drivers/gpu/drm/radeon/radeon_ttm.c
include/drm/drm_dp_helper.h

@@@ -44,9 -44,9 +44,9 @@@
  #include <drm/ttm/ttm_module.h>
  #include <drm/ttm/ttm_execbuf_util.h>
  
- #include <drm/drmP.h>
- #include <drm/drm_gem.h>
  #include <drm/amdgpu_drm.h>
+ #include <drm/drm_gem.h>
+ #include <drm/drm_ioctl.h>
  #include <drm/gpu_scheduler.h>
  
  #include <kgd_kfd_interface.h>
@@@ -84,8 -84,6 +84,8 @@@
  #include "amdgpu_doorbell.h"
  #include "amdgpu_amdkfd.h"
  #include "amdgpu_smu.h"
 +#include "amdgpu_discovery.h"
 +#include "amdgpu_mes.h"
  
  #define MAX_GPU_INSTANCE              16
  
@@@ -144,6 -142,7 +144,6 @@@ extern uint amdgpu_sdma_phase_quantum
  extern char *amdgpu_disable_cu;
  extern char *amdgpu_virtual_display;
  extern uint amdgpu_pp_feature_mask;
 -extern int amdgpu_vram_page_split;
  extern int amdgpu_ngg;
  extern int amdgpu_prim_buf_per_se;
  extern int amdgpu_pos_buf_per_se;
@@@ -156,14 -155,9 +156,14 @@@ extern int amdgpu_gpu_recovery
  extern int amdgpu_emu_mode;
  extern uint amdgpu_smu_memory_pool_size;
  extern uint amdgpu_dc_feature_mask;
 +extern uint amdgpu_dm_abm_level;
  extern struct amdgpu_mgpu_info mgpu_info;
  extern int amdgpu_ras_enable;
  extern uint amdgpu_ras_mask;
 +extern int amdgpu_async_gfx_ring;
 +extern int amdgpu_mcbp;
 +extern int amdgpu_discovery;
 +extern int amdgpu_mes;
  
  #ifdef CONFIG_DRM_AMDGPU_SI
  extern int amdgpu_si_support;
@@@ -219,8 -213,7 +219,8 @@@ struct amdgpu_atif
  struct kfd_vm_fault_info;
  
  enum amdgpu_cp_irq {
 -      AMDGPU_CP_IRQ_GFX_EOP = 0,
 +      AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
 +      AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP,
        AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
        AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
        AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
@@@ -666,8 -659,6 +666,8 @@@ struct amdgpu_nbio_funcs 
        u32 (*get_memsize)(struct amdgpu_device *adev);
        void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
                        bool use_doorbell, int doorbell_index, int doorbell_size);
 +      void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
 +                      int doorbell_index);
        void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
                                         bool enable);
        void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
  };
  
  struct amdgpu_df_funcs {
 -      void (*init)(struct amdgpu_device *adev);
 +      void (*sw_init)(struct amdgpu_device *adev);
        void (*enable_broadcast_mode)(struct amdgpu_device *adev,
                                      bool enable);
        u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
@@@ -738,7 -729,6 +738,7 @@@ struct amd_powerplay 
  };
  
  #define AMDGPU_RESET_MAGIC_NUM 64
 +#define AMDGPU_MAX_DF_PERFMONS 4
  struct amdgpu_device {
        struct device                   *dev;
        struct drm_device               *ddev;
        struct amdgpu_debugfs           debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
        unsigned                        debugfs_count;
  #if defined(CONFIG_DEBUG_FS)
 +      struct dentry                   *debugfs_preempt;
        struct dentry                   *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
  #endif
        struct amdgpu_atif              *atif;
        /* display related functionality */
        struct amdgpu_display_manager dm;
  
 +      /* discovery */
 +      uint8_t                         *discovery;
 +
 +      /* mes */
 +      bool                            enable_mes;
 +      struct amdgpu_mes               mes;
 +
        struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
        int                             num_ip_blocks;
        struct mutex    mn_lock;
        long                            compute_timeout;
  
        uint64_t                        unique_id;
 +      uint64_t        df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
  };
  
  static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@@ -1217,19 -1198,4 +1217,19 @@@ static inline int amdgpu_dm_display_res
  #endif
  
  #include "amdgpu_object.h"
 +
 +/* used by df_v3_6.c and amdgpu_pmu.c */
 +#define AMDGPU_PMU_ATTR(_name, _object)                                       \
 +static ssize_t                                                                \
 +_name##_show(struct device *dev,                                      \
 +                             struct device_attribute *attr,           \
 +                             char *page)                              \
 +{                                                                     \
 +      BUILD_BUG_ON(sizeof(_object) >= PAGE_SIZE - 1);                 \
 +      return sprintf(page, _object "\n");                             \
 +}                                                                     \
 +                                                                      \
 +static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name)
 +
  #endif
 +
@@@ -22,7 -22,7 +22,7 @@@
  
  #include "amdgpu_amdkfd.h"
  #include "amd_shared.h"
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_gfx.h"
  #include "amdgpu_dma_buf.h"
@@@ -78,7 -78,6 +78,7 @@@ void amdgpu_amdkfd_device_probe(struct 
        case CHIP_POLARIS10:
        case CHIP_POLARIS11:
        case CHIP_POLARIS12:
 +      case CHIP_VEGAM:
                kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
                break;
        case CHIP_VEGA10:
@@@ -87,9 -86,6 +87,9 @@@
        case CHIP_RAVEN:
                kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
                break;
 +      case CHIP_NAVI10:
 +              kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions();
 +              break;
        default:
                dev_info(adev->dev, "kfd not supported on this ASIC\n");
                return;
@@@ -162,7 -158,7 +162,7 @@@ void amdgpu_amdkfd_device_init(struct a
  
                /* remove the KIQ bit as well */
                if (adev->gfx.kiq.ring.sched.ready)
 -                      clear_bit(amdgpu_gfx_queue_to_bit(adev,
 +                      clear_bit(amdgpu_gfx_mec_queue_to_bit(adev,
                                                          adev->gfx.kiq.ring.me - 1,
                                                          adev->gfx.kiq.ring.pipe,
                                                          adev->gfx.kiq.ring.queue),
@@@ -440,12 -436,9 +440,12 @@@ void amdgpu_amdkfd_get_local_mem_info(s
  
        if (amdgpu_sriov_vf(adev))
                mem_info->mem_clk_max = adev->clock.default_mclk / 100;
 -      else if (adev->powerplay.pp_funcs)
 -              mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
 -      else
 +      else if (adev->powerplay.pp_funcs) {
 +              if (amdgpu_emu_mode == 1)
 +                      mem_info->mem_clk_max = 0;
 +              else
 +                      mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
 +      } else
                mem_info->mem_clk_max = 100;
  }
  
@@@ -708,11 -701,6 +708,11 @@@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx
        return NULL;
  }
  
 +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
 +{
 +      return NULL;
 +}
 +
  struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
                              const struct kfd2kgd_calls *f2g)
  {
  
  #define pr_fmt(fmt) "kfd2kgd: " fmt
  
+ #include <linux/dma-buf.h>
  #include <linux/list.h>
  #include <linux/pagemap.h>
  #include <linux/sched/mm.h>
- #include <linux/dma-buf.h>
- #include <drm/drmP.h>
+ #include <linux/sched/task.h>
  #include "amdgpu_object.h"
  #include "amdgpu_vm.h"
  #include "amdgpu_amdkfd.h"
@@@ -1731,17 -1732,35 +1732,17 @@@ static int update_invalid_user_pages(st
                ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
                                                   bo->tbo.ttm->pages);
                if (ret) {
 -                      bo->tbo.ttm->pages[0] = NULL;
 -                      pr_info("%s: Failed to get user pages: %d\n",
 +                      pr_debug("%s: Failed to get user pages: %d\n",
                                __func__, ret);
 -                      /* Pretend it succeeded. It will fail later
 -                       * with a VM fault if the GPU tries to access
 -                       * it. Better than hanging indefinitely with
 -                       * stalled user mode queues.
 -                       */
 -              }
 -      }
  
 -      return 0;
 -}
 -
 -/* Remove invalid userptr BOs from hmm track list
 - *
 - * Stop HMM track the userptr update
 - */
 -static void untrack_invalid_user_pages(struct amdkfd_process_info *process_info)
 -{
 -      struct kgd_mem *mem, *tmp_mem;
 -      struct amdgpu_bo *bo;
 +                      /* Return error -EBUSY or -ENOMEM, retry restore */
 +                      return ret;
 +              }
  
 -      list_for_each_entry_safe(mem, tmp_mem,
 -                               &process_info->userptr_inval_list,
 -                               validate_list.head) {
 -              bo = mem->bo;
                amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
        }
 +
 +      return 0;
  }
  
  /* Validate invalid userptr BOs
@@@ -1823,6 -1842,13 +1824,6 @@@ static int validate_invalid_user_pages(
                list_move_tail(&mem->validate_list.head,
                               &process_info->userptr_valid_list);
  
 -              /* Stop HMM track the userptr update. We dont check the return
 -               * value for concurrent CPU page table update because we will
 -               * reschedule the restore worker if process_info->evicted_bos
 -               * is updated.
 -               */
 -              amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
 -
                /* Update mapping. If the BO was not validated
                 * (because we couldn't get user pages), this will
                 * clear the page table entries, which will result in
@@@ -1921,6 -1947,7 +1922,6 @@@ static void amdgpu_amdkfd_restore_userp
        }
  
  unlock_out:
 -      untrack_invalid_user_pages(process_info);
        mutex_unlock(&process_info->lock);
        mmput(mm);
        put_task_struct(usertask);
@@@ -2126,16 -2153,12 +2127,16 @@@ int amdgpu_amdkfd_add_gws_to_process(vo
         * Add process eviction fence to bo so they can
         * evict each other.
         */
 +      ret = reservation_object_reserve_shared(gws_bo->tbo.resv, 1);
 +      if (ret)
 +              goto reserve_shared_fail;
        amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
        amdgpu_bo_unreserve(gws_bo);
        mutex_unlock(&(*mem)->process_info->lock);
  
        return ret;
  
 +reserve_shared_fail:
  bo_validation_failure:
        amdgpu_bo_unreserve(gws_bo);
  bo_reservation_failure:
@@@ -20,7 -20,7 +20,7 @@@
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
  #include "amdgpu.h"
  #include "atomfirmware.h"
@@@ -118,7 -118,6 +118,7 @@@ union umc_info 
  
  union vram_info {
        struct atom_vram_info_header_v2_3 v23;
 +      struct atom_vram_info_header_v2_4 v24;
  };
  /*
   * Return vram width from integrated system info table, if available,
  int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
  {
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
 -      int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 -                                              integratedsysteminfo);
 +      int index;
        u16 data_offset, size;
        union igp_info *igp_info;
 +      union vram_info *vram_info;
 +      u32 mem_channel_number;
 +      u32 mem_channel_width;
        u8 frev, crev;
  
 +      if (adev->flags & AMD_IS_APU)
 +              index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 +                                                  integratedsysteminfo);
 +      else
 +              index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 +                                                  vram_info);
 +
        /* get any igp specific overrides */
        if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
                                   &frev, &crev, &data_offset)) {
 -              igp_info = (union igp_info *)
 -                      (mode_info->atom_context->bios + data_offset);
 -              switch (crev) {
 -              case 11:
 -                      return igp_info->v11.umachannelnumber * 64;
 -              default:
 -                      return 0;
 +              if (adev->flags & AMD_IS_APU) {
 +                      igp_info = (union igp_info *)
 +                              (mode_info->atom_context->bios + data_offset);
 +                      switch (crev) {
 +                      case 11:
 +                              mem_channel_number = igp_info->v11.umachannelnumber;
 +                              /* channel width is 64 */
 +                              return mem_channel_number * 64;
 +                      default:
 +                              return 0;
 +                      }
 +              } else {
 +                      vram_info = (union vram_info *)
 +                              (mode_info->atom_context->bios + data_offset);
 +                      switch (crev) {
 +                      case 3:
 +                              mem_channel_number = vram_info->v23.vram_module[0].channel_num;
 +                              mem_channel_width = vram_info->v23.vram_module[0].channel_width;
 +                              return mem_channel_number * (1 << mem_channel_width);
 +                      case 4:
 +                              mem_channel_number = vram_info->v24.vram_module[0].channel_num;
 +                              mem_channel_width = vram_info->v24.vram_module[0].channel_width;
 +                              return mem_channel_number * (1 << mem_channel_width);
 +                      default:
 +                              return 0;
 +                      }
                }
        }
  
@@@ -208,9 -179,6 +208,9 @@@ static int convert_atom_mem_type_to_vra
                case ATOM_DGPU_VRAM_TYPE_HBM2:
                        vram_type = AMDGPU_VRAM_TYPE_HBM;
                        break;
 +              case ATOM_DGPU_VRAM_TYPE_GDDR6:
 +                      vram_type = AMDGPU_VRAM_TYPE_GDDR6;
 +                      break;
                default:
                        vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
                        break;
@@@ -258,9 -226,6 +258,9 @@@ int amdgpu_atomfirmware_get_vram_type(s
                        switch (crev) {
                        case 3:
                                mem_type = vram_info->v23.vram_module[0].memory_type;
 +                              return convert_atom_mem_type_to_vram_type(adev, mem_type);
 +                      case 4:
 +                              mem_type = vram_info->v24.vram_module[0].memory_type;
                                return convert_atom_mem_type_to_vram_type(adev, mem_type);
                        default:
                                return 0;
   * Authors:
   *    Jerome Glisse <glisse@freedesktop.org>
   */
+ #include <linux/file.h>
  #include <linux/pagemap.h>
  #include <linux/sync_file.h>
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_syncobj.h>
  #include "amdgpu.h"
@@@ -648,7 -650,7 +650,7 @@@ static int amdgpu_cs_parser_bos(struct 
        }
  
        r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 -                                 &duplicates, true);
 +                                 &duplicates, false);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
        }
  
        r = amdgpu_cs_list_validate(p, &duplicates);
 -      if (r) {
 -              DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
 +      if (r)
                goto error_validate;
 -      }
  
        r = amdgpu_cs_list_validate(p, &p->validated);
 -      if (r) {
 -              DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
 +      if (r)
                goto error_validate;
 -      }
  
        amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
                                     p->bytes_moved_vis);
@@@ -872,7 -878,7 +874,7 @@@ static int amdgpu_cs_vm_handling(struc
        if (r)
                return r;
  
 -      if (amdgpu_sriov_vf(adev)) {
 +      if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                struct dma_fence *f;
  
                bo_va = fpriv->csa_va;
@@@ -961,8 -967,7 +963,8 @@@ static int amdgpu_cs_ib_fill(struct amd
                if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
                        continue;
  
 -              if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
 +              if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
 +                  (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
                        if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
                                if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
                                        ce_preempt++;
@@@ -1380,7 -1385,7 +1382,7 @@@ int amdgpu_cs_ioctl(struct drm_device *
        if (r) {
                if (r == -ENOMEM)
                        DRM_ERROR("Not enough memory for command submission!\n");
 -              else if (r != -ERESTARTSYS)
 +              else if (r != -ERESTARTSYS && r != -EAGAIN)
                        DRM_ERROR("Failed to process the buffer list %d!\n", r);
                goto out;
        }
   */
  
  #include <linux/kthread.h>
- #include <drm/drmP.h>
- #include <linux/debugfs.h>
+ #include <linux/pci.h>
+ #include <linux/uaccess.h>
+ #include <drm/drm_debugfs.h>
  #include "amdgpu.h"
  
  /**
@@@ -920,195 -923,17 +923,195 @@@ static const struct drm_info_list amdgp
        {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
  };
  
 +static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
 +                                        struct dma_fence **fences)
 +{
 +      struct amdgpu_fence_driver *drv = &ring->fence_drv;
 +      uint32_t sync_seq, last_seq;
 +
 +      last_seq = atomic_read(&ring->fence_drv.last_seq);
 +      sync_seq = ring->fence_drv.sync_seq;
 +
 +      last_seq &= drv->num_fences_mask;
 +      sync_seq &= drv->num_fences_mask;
 +
 +      do {
 +              struct dma_fence *fence, **ptr;
 +
 +              ++last_seq;
 +              last_seq &= drv->num_fences_mask;
 +              ptr = &drv->fences[last_seq];
 +
 +              fence = rcu_dereference_protected(*ptr, 1);
 +              RCU_INIT_POINTER(*ptr, NULL);
 +
 +              if (!fence)
 +                      continue;
 +
 +              fences[last_seq] = fence;
 +
 +      } while (last_seq != sync_seq);
 +}
 +
 +static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
 +                                          int length)
 +{
 +      int i;
 +      struct dma_fence *fence;
 +
 +      for (i = 0; i < length; i++) {
 +              fence = fences[i];
 +              if (!fence)
 +                      continue;
 +              dma_fence_signal(fence);
 +              dma_fence_put(fence);
 +      }
 +}
 +
 +static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
 +{
 +      struct drm_sched_job *s_job;
 +      struct dma_fence *fence;
 +
 +      spin_lock(&sched->job_list_lock);
 +      list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
 +              fence = sched->ops->run_job(s_job);
 +              dma_fence_put(fence);
 +      }
 +      spin_unlock(&sched->job_list_lock);
 +}
 +
 +static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
 +{
 +      struct amdgpu_job *job;
 +      struct drm_sched_job *s_job;
 +      uint32_t preempt_seq;
 +      struct dma_fence *fence, **ptr;
 +      struct amdgpu_fence_driver *drv = &ring->fence_drv;
 +      struct drm_gpu_scheduler *sched = &ring->sched;
 +
 +      if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
 +              return;
 +
 +      preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
 +      if (preempt_seq <= atomic_read(&drv->last_seq))
 +              return;
 +
 +      preempt_seq &= drv->num_fences_mask;
 +      ptr = &drv->fences[preempt_seq];
 +      fence = rcu_dereference_protected(*ptr, 1);
 +
 +      spin_lock(&sched->job_list_lock);
 +      list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
 +              job = to_amdgpu_job(s_job);
 +              if (job->fence == fence)
 +                      /* mark the job as preempted */
 +                      job->preemption_status |= AMDGPU_IB_PREEMPTED;
 +      }
 +      spin_unlock(&sched->job_list_lock);
 +}
 +
 +static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
 +{
 +      int r, resched, length;
 +      struct amdgpu_ring *ring;
 +      struct dma_fence **fences = NULL;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)data;
 +
 +      if (val >= AMDGPU_MAX_RINGS)
 +              return -EINVAL;
 +
 +      ring = adev->rings[val];
 +
 +      if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
 +              return -EINVAL;
 +
 +      /* the last preemption failed */
 +      if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
 +              return -EBUSY;
 +
 +      length = ring->fence_drv.num_fences_mask + 1;
 +      fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
 +      if (!fences)
 +              return -ENOMEM;
 +
 +      /* stop the scheduler */
 +      kthread_park(ring->sched.thread);
 +
 +      resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 +
 +      /* preempt the IB */
 +      r = amdgpu_ring_preempt_ib(ring);
 +      if (r) {
 +              DRM_WARN("failed to preempt ring %d\n", ring->idx);
 +              goto failure;
 +      }
 +
 +      amdgpu_fence_process(ring);
 +
 +      if (atomic_read(&ring->fence_drv.last_seq) !=
 +          ring->fence_drv.sync_seq) {
 +              DRM_INFO("ring %d was preempted\n", ring->idx);
 +
 +              amdgpu_ib_preempt_mark_partial_job(ring);
 +
 +              /* swap out the old fences */
 +              amdgpu_ib_preempt_fences_swap(ring, fences);
 +
 +              amdgpu_fence_driver_force_completion(ring);
 +
 +              /* resubmit unfinished jobs */
 +              amdgpu_ib_preempt_job_recovery(&ring->sched);
 +
 +              /* wait for jobs finished */
 +              amdgpu_fence_wait_empty(ring);
 +
 +              /* signal the old fences */
 +              amdgpu_ib_preempt_signal_fences(fences, length);
 +      }
 +
 +failure:
 +      /* restart the scheduler */
 +      kthread_unpark(ring->sched.thread);
 +
 +      ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
 +
 +      if (fences)
 +              kfree(fences);
 +
 +      return 0;
 +}
 +
 +DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
 +                      amdgpu_debugfs_ib_preempt, "%llu\n");
 +
  int amdgpu_debugfs_init(struct amdgpu_device *adev)
  {
 +      adev->debugfs_preempt =
 +              debugfs_create_file("amdgpu_preempt_ib", 0600,
 +                                  adev->ddev->primary->debugfs_root,
 +                                  (void *)adev, &fops_ib_preempt);
 +      if (!(adev->debugfs_preempt)) {
 +              DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
 +              return -EIO;
 +      }
 +
        return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
                                        ARRAY_SIZE(amdgpu_debugfs_list));
  }
  
 +void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
 +{
 +      if (adev->debugfs_preempt)
 +              debugfs_remove(adev->debugfs_preempt);
 +}
 +
  #else
  int amdgpu_debugfs_init(struct amdgpu_device *adev)
  {
        return 0;
  }
 +void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { }
  int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
  {
        return 0;
   */
  #include <linux/power_supply.h>
  #include <linux/kthread.h>
+ #include <linux/module.h>
  #include <linux/console.h>
  #include <linux/slab.h>
- #include <drm/drmP.h>
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_probe_helper.h>
  #include <drm/amdgpu_drm.h>
@@@ -51,7 -52,6 +52,7 @@@
  #endif
  #include "vi.h"
  #include "soc15.h"
 +#include "nv.h"
  #include "bif/bif_4_1_d.h"
  #include <linux/pci.h>
  #include <linux/firmware.h>
  
  #include "amdgpu_xgmi.h"
  #include "amdgpu_ras.h"
 +#include "amdgpu_pmu.h"
  
  MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
  MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
  MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
  MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
 +MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
  
  #define AMDGPU_RESUME_MS              2000
  
@@@ -97,7 -95,6 +98,7 @@@ static const char *amdgpu_asic_name[] 
        "VEGA12",
        "VEGA20",
        "RAVEN",
 +      "NAVI10",
        "LAST",
  };
  
@@@ -510,10 -507,7 +511,10 @@@ void amdgpu_device_program_register_seq
                } else {
                        tmp = RREG32(reg);
                        tmp &= ~and_mask;
 -                      tmp |= or_mask;
 +                      if (adev->family >= AMDGPU_FAMILY_AI)
 +                              tmp |= (or_mask & and_mask);
 +                      else
 +                              tmp |= or_mask;
                }
                WREG32(reg, tmp);
        }
@@@ -980,6 -974,13 +981,6 @@@ static int amdgpu_device_check_argument
  
        amdgpu_device_check_block_size(adev);
  
 -      if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
 -          !is_power_of_2(amdgpu_vram_page_split))) {
 -              dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
 -                       amdgpu_vram_page_split);
 -              amdgpu_vram_page_split = 1024;
 -      }
 -
        ret = amdgpu_device_get_job_timeout_settings(adev);
        if (ret) {
                dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
@@@ -1383,9 -1384,6 +1384,9 @@@ static int amdgpu_device_parse_gpu_info
                else
                        chip_name = "raven";
                break;
 +      case CHIP_NAVI10:
 +              chip_name = "navi10";
 +              break;
        }
  
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
                adev->gfx.cu_info.max_scratch_slots_per_cu =
                        le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
                adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
 +              if (hdr->version_minor >= 1) {
 +                      const struct gpu_info_firmware_v1_1 *gpu_info_fw =
 +                              (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
 +                                                                      le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 +                      adev->gfx.config.num_sc_per_sh =
 +                              le32_to_cpu(gpu_info_fw->num_sc_per_sh);
 +                      adev->gfx.config.num_packer_per_sc =
 +                              le32_to_cpu(gpu_info_fw->num_packer_per_sc);
 +              }
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +              if (hdr->version_minor == 2) {
 +                      const struct gpu_info_firmware_v1_2 *gpu_info_fw =
 +                              (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
 +                                                                      le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 +                      adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
 +              }
 +#endif
                break;
        }
        default:
@@@ -1537,13 -1518,6 +1538,13 @@@ static int amdgpu_device_ip_early_init(
                if (r)
                        return r;
                break;
 +      case  CHIP_NAVI10:
 +              adev->family = AMDGPU_FAMILY_NV;
 +
 +              r = nv_set_ip_blocks(adev);
 +              if (r)
 +                      return r;
 +              break;
        default:
                /* FIXME: not supported yet */
                return -EINVAL;
        if (amdgpu_sriov_vf(adev))
                adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
  
 -      /* Read BIOS */
 -      if (!amdgpu_get_bios(adev))
 -              return -EINVAL;
 -
 -      r = amdgpu_atombios_init(adev);
 -      if (r) {
 -              dev_err(adev->dev, "amdgpu_atombios_init failed\n");
 -              amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
 -              return r;
 -      }
 -
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
                        DRM_ERROR("disabled ip block: %d <%s>\n",
                                adev->ip_blocks[i].status.valid = true;
                        }
                }
 +              /* get the vbios after the asic_funcs are set up */
 +              if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
 +                      /* Read BIOS */
 +                      if (!amdgpu_get_bios(adev))
 +                              return -EINVAL;
 +
 +                      r = amdgpu_atombios_init(adev);
 +                      if (r) {
 +                              dev_err(adev->dev, "amdgpu_atombios_init failed\n");
 +                              amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
 +                              return r;
 +                      }
 +              }
        }
  
        adev->cg_flags &= amdgpu_cg_mask;
@@@ -1741,7 -1713,7 +1742,7 @@@ static int amdgpu_device_ip_init(struc
                        adev->ip_blocks[i].status.hw = true;
  
                        /* right after GMC hw init, we create CSA */
 -                      if (amdgpu_sriov_vf(adev)) {
 +                      if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                                r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
                                                                AMDGPU_GEM_DOMAIN_VRAM,
                                                                AMDGPU_CSA_SIZE);
@@@ -2423,9 -2395,6 +2424,9 @@@ bool amdgpu_device_asic_has_dc_support(
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
        case CHIP_RAVEN:
  #endif
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      case CHIP_NAVI10:
 +#endif
                return amdgpu_dc != 0;
  #endif
        default:
@@@ -2597,20 -2566,6 +2598,20 @@@ int amdgpu_device_init(struct amdgpu_de
  
        amdgpu_device_get_pcie_info(adev);
  
 +      if (amdgpu_mcbp)
 +              DRM_INFO("MCBP is enabled\n");
 +
 +      if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
 +              adev->enable_mes = true;
 +
 +      if (amdgpu_discovery) {
 +              r = amdgpu_discovery_init(adev);
 +              if (r) {
 +                      dev_err(adev->dev, "amdgpu_discovery_init failed\n");
 +                      return r;
 +              }
 +      }
 +
        /* early init functions */
        r = amdgpu_device_ip_early_init(adev);
        if (r)
@@@ -2735,9 -2690,6 +2736,9 @@@ fence_driver_init
  
        amdgpu_fbdev_init(adev);
  
 +      if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
 +              amdgpu_pm_virt_sysfs_init(adev);
 +
        r = amdgpu_pm_sysfs_init(adev);
        if (r)
                DRM_ERROR("registering pm debugfs failed (%d).\n", r);
                return r;
        }
  
 +      r = amdgpu_pmu_init(adev);
 +      if (r)
 +              dev_err(adev->dev, "amdgpu_pmu_init failed\n");
 +
        return 0;
  
  failed:
@@@ -2863,16 -2811,9 +2864,16 @@@ void amdgpu_device_fini(struct amdgpu_d
        iounmap(adev->rmmio);
        adev->rmmio = NULL;
        amdgpu_device_doorbell_fini(adev);
 +      if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
 +              amdgpu_pm_virt_sysfs_fini(adev);
 +
        amdgpu_debugfs_regs_cleanup(adev);
        device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
        amdgpu_ucode_sysfs_fini(adev);
 +      amdgpu_pmu_fini(adev);
 +      amdgpu_debugfs_preempt_cleanup(adev);
 +      if (amdgpu_discovery)
 +              amdgpu_discovery_fini(adev);
  }
  
  
@@@ -22,7 -22,6 +22,6 @@@
   * Authors: Alex Deucher
   */
  
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_atombios.h"
  #include "amdgpu_i2c.h"
@@@ -907,63 -906,16 +906,63 @@@ amdgpu_get_vce_clock_state(void *handle
  
  int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
  {
 -      if (is_support_sw_smu(adev))
 -              return smu_get_sclk(&adev->smu, low);
 -      else
 +      uint32_t clk_freq;
 +      int ret = 0;
 +      if (is_support_sw_smu(adev)) {
 +              ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
 +                                           low ? &clk_freq : NULL,
 +                                           !low ? &clk_freq : NULL);
 +              if (ret)
 +                      return 0;
 +              return clk_freq * 100;
 +
 +      } else {
                return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
 +      }
  }
  
  int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
  {
 -      if (is_support_sw_smu(adev))
 -              return smu_get_mclk(&adev->smu, low);
 -      else
 +      uint32_t clk_freq;
 +      int ret = 0;
 +      if (is_support_sw_smu(adev)) {
 +              ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
 +                                           low ? &clk_freq : NULL,
 +                                           !low ? &clk_freq : NULL);
 +              if (ret)
 +                      return 0;
 +              return clk_freq * 100;
 +
 +      } else {
                return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
 +      }
 +}
 +
 +int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
 +{
 +      int ret = 0;
 +      bool swsmu = is_support_sw_smu(adev);
 +
 +      switch (block_type) {
 +      case AMD_IP_BLOCK_TYPE_GFX:
 +      case AMD_IP_BLOCK_TYPE_UVD:
 +      case AMD_IP_BLOCK_TYPE_VCN:
 +      case AMD_IP_BLOCK_TYPE_VCE:
 +              if (swsmu)
 +                      ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
 +              else
 +                      ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
 +                              (adev)->powerplay.pp_handle, block_type, gate));
 +              break;
 +      case AMD_IP_BLOCK_TYPE_GMC:
 +      case AMD_IP_BLOCK_TYPE_ACP:
 +      case AMD_IP_BLOCK_TYPE_SDMA:
 +              ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
 +                              (adev)->powerplay.pp_handle, block_type, gate));
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      return ret;
  }
   * OTHER DEALINGS IN THE SOFTWARE.
   */
  
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
+ #include <drm/drm_drv.h>
  #include <drm/drm_gem.h>
+ #include <drm/drm_vblank.h>
  #include "amdgpu_drv.h"
  
  #include <drm/drm_pciids.h>
  #include <linux/console.h>
  #include <linux/module.h>
+ #include <linux/pci.h>
  #include <linux/pm_runtime.h>
  #include <linux/vga_switcheroo.h>
  #include <drm/drm_probe_helper.h>
   * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
   * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
   * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
 + * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
   */
  #define KMS_DRIVER_MAJOR      3
 -#define KMS_DRIVER_MINOR      32
 +#define KMS_DRIVER_MINOR      33
  #define KMS_DRIVER_PATCHLEVEL 0
  
  #define AMDGPU_MAX_TIMEOUT_PARAM_LENTH        256
@@@ -109,6 -110,7 +111,6 @@@ int amdgpu_vm_fragment_size = -1
  int amdgpu_vm_block_size = -1;
  int amdgpu_vm_fault_stop = 0;
  int amdgpu_vm_debug = 0;
 -int amdgpu_vram_page_split = 512;
  int amdgpu_vm_update_mode = -1;
  int amdgpu_exp_hw_support = 0;
  int amdgpu_dc = -1;
@@@ -136,10 -138,6 +138,10 @@@ int amdgpu_emu_mode = 0
  uint amdgpu_smu_memory_pool_size = 0;
  /* FBC (bit 0) disabled by default*/
  uint amdgpu_dc_feature_mask = 0;
 +int amdgpu_async_gfx_ring = 1;
 +int amdgpu_mcbp = 0;
 +int amdgpu_discovery = 0;
 +int amdgpu_mes = 0;
  
  struct amdgpu_mgpu_info mgpu_info = {
        .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
@@@ -251,9 -249,7 +253,9 @@@ module_param_string(lockup_timeout, amd
  
  /**
   * DOC: dpm (int)
 - * Override for dynamic power management setting (1 = enable, 0 = disable). The default is -1 (auto).
 + * Override for dynamic power management setting
 + * (0 = disable, 1 = enable, 2 = enable sw smu driver for vega20)
 + * The default is -1 (auto).
   */
  MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
  module_param_named(dpm, amdgpu_dpm, int, 0444);
@@@ -349,6 -345,13 +351,6 @@@ MODULE_PARM_DESC(vm_update_mode, "VM up
  module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
  
  /**
 - * DOC: vram_page_split (int)
 - * Override the number of pages after we split VRAM allocations (default 512, -1 = disable). The default is 512.
 - */
 -MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
 -module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
 -
 -/**
   * DOC: exp_hw_support (int)
   * Enable experimental hw support (1 = enable). The default is 0 (disabled).
   */
@@@ -571,26 -574,6 +573,26 @@@ MODULE_PARM_DESC(smu_memory_pool_size
                "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
  module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
  
 +/**
 + * DOC: async_gfx_ring (int)
 + * It is used to enable gfx rings that could be configured with different prioritites or equal priorities
 + */
 +MODULE_PARM_DESC(async_gfx_ring,
 +      "Asynchronous GFX rings that could be configured with either different priorities (HP3D ring and LP3D ring), or equal priorities (0 = disabled, 1 = enabled (default))");
 +module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444);
 +
 +MODULE_PARM_DESC(mcbp,
 +      "Enable Mid-command buffer preemption (0 = disabled (default), 1 = enabled)");
 +module_param_named(mcbp, amdgpu_mcbp, int, 0444);
 +
 +MODULE_PARM_DESC(discovery,
 +      "Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
 +module_param_named(discovery, amdgpu_discovery, int, 0444);
 +
 +MODULE_PARM_DESC(mes,
 +      "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
 +module_param_named(mes, amdgpu_mes, int, 0444);
 +
  #ifdef CONFIG_HSA_AMD
  /**
   * DOC: sched_policy (int)
@@@ -695,14 -678,6 +697,14 @@@ MODULE_PARM_DESC(halt_if_hws_hang, "Hal
  bool hws_gws_support;
  module_param(hws_gws_support, bool, 0444);
  MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supported (Default), true = supported)");
 +
 +/**
 +  * DOC: queue_preemption_timeout_ms (int)
 +  * queue preemption timeout in ms (1 = Minimum, 9000 = default)
 +  */
 +int queue_preemption_timeout_ms;
 +module_param(queue_preemption_timeout_ms, int, 0644);
 +MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)");
  #endif
  
  /**
  MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
  module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
  
 +/**
 + * DOC: abmlevel (uint)
 + * Override the default ABM (Adaptive Backlight Management) level used for DC
 + * enabled hardware. Requires DMCU to be supported and loaded.
 + * Valid levels are 0-4. A value of 0 indicates that ABM should be disabled by
 + * default. Values 1-4 control the maximum allowable brightness reduction via
 + * the ABM algorithm, with 1 being the least reduction and 4 being the most
 + * reduction.
 + *
 + * Defaults to 0, or disabled. Userspace can still override this level later
 + * after boot.
 + */
 +uint amdgpu_dm_abm_level = 0;
 +MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
 +module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
 +
  static const struct pci_device_id pciidlist[] = {
  #ifdef  CONFIG_DRM_AMDGPU_SI
        {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
        /* Raven */
        {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
        {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
 +      /* Navi10 */
 +      {0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
 +      {0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
 +      {0x1002, 0x7318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
 +      {0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
 +      {0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
 +      {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
 +      {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
  
        {0, 0, 0}
  };
@@@ -34,7 -34,9 +34,9 @@@
  #include <linux/kref.h>
  #include <linux/slab.h>
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <drm/drm_debugfs.h>
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
  
@@@ -707,30 -709,22 +709,30 @@@ static int amdgpu_debugfs_fence_info(st
                amdgpu_fence_process(ring);
  
                seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
 -              seq_printf(m, "Last signaled fence 0x%08x\n",
 +              seq_printf(m, "Last signaled fence          0x%08x\n",
                           atomic_read(&ring->fence_drv.last_seq));
 -              seq_printf(m, "Last emitted        0x%08x\n",
 +              seq_printf(m, "Last emitted                 0x%08x\n",
                           ring->fence_drv.sync_seq);
  
 +              if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
 +                  ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
 +                      seq_printf(m, "Last signaled trailing fence 0x%08x\n",
 +                                 le32_to_cpu(*ring->trail_fence_cpu_addr));
 +                      seq_printf(m, "Last emitted                 0x%08x\n",
 +                                 ring->trail_seq);
 +              }
 +
                if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
                        continue;
  
                /* set in CP_VMID_PREEMPT and preemption occurred */
 -              seq_printf(m, "Last preempted      0x%08x\n",
 +              seq_printf(m, "Last preempted               0x%08x\n",
                           le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
                /* set in CP_VMID_RESET and reset occurred */
 -              seq_printf(m, "Last reset          0x%08x\n",
 +              seq_printf(m, "Last reset                   0x%08x\n",
                           le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
                /* Both preemption and reset occurred */
 -              seq_printf(m, "Last both           0x%08x\n",
 +              seq_printf(m, "Last both                    0x%08x\n",
                           le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
        }
        return 0;
   *          Jerome Glisse
   */
  #include <linux/ktime.h>
+ #include <linux/module.h>
  #include <linux/pagemap.h>
- #include <drm/drmP.h>
+ #include <linux/pci.h>
  #include <drm/amdgpu_drm.h>
+ #include <drm/drm_debugfs.h>
  #include "amdgpu.h"
  #include "amdgpu_display.h"
  #include "amdgpu_xgmi.h"
@@@ -171,7 -175,7 +175,7 @@@ void amdgpu_gem_object_close(struct drm
  
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
  
 -      r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, true);
 +      r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
                        "we fail to reserve bo (%d)\n", r);
@@@ -608,7 -612,7 +612,7 @@@ int amdgpu_gem_va_ioctl(struct drm_devi
  
        amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
  
 -      r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, true);
 +      r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
        if (r)
                goto error_unref;
  
@@@ -22,7 -22,7 +22,7 @@@
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_gfx.h"
  #include "amdgpu_rlc.h"
@@@ -34,8 -34,8 +34,8 @@@
   * GPU GFX IP block helpers function.
   */
  
 -int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev, int mec,
 -                          int pipe, int queue)
 +int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
 +                              int pipe, int queue)
  {
        int bit = 0;
  
@@@ -47,8 -47,8 +47,8 @@@
        return bit;
  }
  
 -void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit,
 -                           int *mec, int *pipe, int *queue)
 +void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
 +                               int *mec, int *pipe, int *queue)
  {
        *queue = bit % adev->gfx.mec.num_queue_per_pipe;
        *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
  bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
                                     int mec, int pipe, int queue)
  {
 -      return test_bit(amdgpu_gfx_queue_to_bit(adev, mec, pipe, queue),
 +      return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
                        adev->gfx.mec.queue_bitmap);
  }
  
 +int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
 +                             int me, int pipe, int queue)
 +{
 +      int bit = 0;
 +
 +      bit += me * adev->gfx.me.num_pipe_per_me
 +              * adev->gfx.me.num_queue_per_pipe;
 +      bit += pipe * adev->gfx.me.num_queue_per_pipe;
 +      bit += queue;
 +
 +      return bit;
 +}
 +
 +void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
 +                              int *me, int *pipe, int *queue)
 +{
 +      *queue = bit % adev->gfx.me.num_queue_per_pipe;
 +      *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
 +              % adev->gfx.me.num_pipe_per_me;
 +      *me = (bit / adev->gfx.me.num_queue_per_pipe)
 +              / adev->gfx.me.num_pipe_per_me;
 +}
 +
 +bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
 +                                  int me, int pipe, int queue)
 +{
 +      return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
 +                      adev->gfx.me.queue_bitmap);
 +}
 +
  /**
   * amdgpu_gfx_scratch_get - Allocate a scratch register
   *
@@@ -229,30 -199,6 +229,30 @@@ void amdgpu_gfx_compute_queue_acquire(s
                adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
  }
  
 +void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
 +{
 +      int i, queue, pipe, me;
 +
 +      for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
 +              queue = i % adev->gfx.me.num_queue_per_pipe;
 +              pipe = (i / adev->gfx.me.num_queue_per_pipe)
 +                      % adev->gfx.me.num_pipe_per_me;
 +              me = (i / adev->gfx.me.num_queue_per_pipe)
 +                    / adev->gfx.me.num_pipe_per_me;
 +
 +              if (me >= adev->gfx.me.num_me)
 +                      break;
 +              /* policy: amdgpu owns the first queue per pipe at this stage
 +               * will extend to mulitple queues per pipe later */
 +              if (me == 0 && queue < 1)
 +                      set_bit(i, adev->gfx.me.queue_bitmap);
 +      }
 +
 +      /* update the number of active graphics rings */
 +      adev->gfx.num_gfx_rings =
 +              bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
 +}
 +
  static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
                                  struct amdgpu_ring *ring)
  {
                if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
                        continue;
  
 -              amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue);
 +              amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
  
                /*
                 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
@@@ -360,9 -306,9 +360,9 @@@ int amdgpu_gfx_kiq_init(struct amdgpu_d
        return 0;
  }
  
 -/* create MQD for each compute queue */
 -int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
 -                                 unsigned mqd_size)
 +/* create MQD for each compute/gfx queue */
 +int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
 +                         unsigned mqd_size)
  {
        struct amdgpu_ring *ring = NULL;
        int r, i;
                                dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
        }
  
 +      if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) {
 +              /* create MQD for each KGQ */
 +              for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
 +                      ring = &adev->gfx.gfx_ring[i];
 +                      if (!ring->mqd_obj) {
 +                              r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
 +                                                          AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
 +                                                          &ring->mqd_gpu_addr, &ring->mqd_ptr);
 +                              if (r) {
 +                                      dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
 +                                      return r;
 +                              }
 +
 +                              /* prepare MQD backup */
 +                              adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
 +                              if (!adev->gfx.me.mqd_backup[i])
 +                                      dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
 +                      }
 +              }
 +      }
 +
        /* create MQD for each KCQ */
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
                                                    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
                                                    &ring->mqd_gpu_addr, &ring->mqd_ptr);
                        if (r) {
 -                              dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
 +                              dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
                                return r;
                        }
  
        return 0;
  }
  
 -void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
 +void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
  {
        struct amdgpu_ring *ring = NULL;
        int i;
  
 +      if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) {
 +              for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
 +                      ring = &adev->gfx.gfx_ring[i];
 +                      kfree(adev->gfx.me.mqd_backup[i]);
 +                      amdgpu_bo_free_kernel(&ring->mqd_obj,
 +                                            &ring->mqd_gpu_addr,
 +                                            &ring->mqd_ptr);
 +              }
 +      }
 +
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
                kfree(adev->gfx.mec.mqd_backup[i]);
        }
  
        ring = &adev->gfx.kiq.ring;
 +      if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring)
 +              kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
        kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
        amdgpu_bo_free_kernel(&ring->mqd_obj,
                              &ring->mqd_gpu_addr,
                              &ring->mqd_ptr);
  }
  
 +int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
 +{
 +      struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 +      struct amdgpu_ring *kiq_ring = &kiq->ring;
 +      int i;
 +
 +      if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
 +              return -EINVAL;
 +
 +      if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
 +                                      adev->gfx.num_compute_rings))
 +              return -ENOMEM;
 +
 +      for (i = 0; i < adev->gfx.num_compute_rings; i++)
 +              kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
 +                                         RESET_QUEUES, 0, 0);
 +
 +      return amdgpu_ring_test_ring(kiq_ring);
 +}
 +
 +int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
 +{
 +      struct amdgpu_kiq *kiq = &adev->gfx.kiq;
 +      struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
 +      uint64_t queue_mask = 0;
 +      int r, i;
 +
 +      if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
 +              return -EINVAL;
 +
 +      for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
 +              if (!test_bit(i, adev->gfx.mec.queue_bitmap))
 +                      continue;
 +
 +              /* This situation may be hit in the future if a new HW
 +               * generation exposes more than 64 queues. If so, the
 +               * definition of queue_mask needs updating */
 +              if (WARN_ON(i > (sizeof(queue_mask)*8))) {
 +                      DRM_ERROR("Invalid KCQ enabled: %d\n", i);
 +                      break;
 +              }
 +
 +              queue_mask |= (1ull << i);
 +      }
 +
 +      DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
 +                                                      kiq_ring->queue);
 +
 +      r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
 +                                      adev->gfx.num_compute_rings +
 +                                      kiq->pmf->set_resources_size);
 +      if (r) {
 +              DRM_ERROR("Failed to lock KIQ (%d).\n", r);
 +              return r;
 +      }
 +
 +      kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
 +      for (i = 0; i < adev->gfx.num_compute_rings; i++)
 +              kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
 +
 +      r = amdgpu_ring_test_helper(kiq_ring);
 +      if (r)
 +              DRM_ERROR("KCQ enable failed\n");
 +
 +      return r;
 +}
 +
  /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
   *
   * @adev: amdgpu_device pointer
@@@ -547,9 -393,7 +547,9 @@@ void amdgpu_gfx_off_ctrl(struct amdgpu_
        if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
                return;
  
 -      if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu)
 +      if (!is_support_sw_smu(adev) &&
 +          (!adev->powerplay.pp_funcs ||
 +           !adev->powerplay.pp_funcs->set_powergating_by_smu))
                return;
  
  
   */
  #include <linux/seq_file.h>
  #include <linux/slab.h>
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
+ #include <drm/drm_debugfs.h>
  #include "amdgpu.h"
  #include "atom.h"
  #include "amdgpu_trace.h"
@@@ -209,7 -211,6 +211,7 @@@ int amdgpu_ib_schedule(struct amdgpu_ri
        skip_preamble = ring->current_ctx == fence_ctx;
        if (job && ring->funcs->emit_cntxcntl) {
                status |= job->preamble_status;
 +              status |= job->preemption_status;
                amdgpu_ring_emit_cntxcntl(ring, status);
        }
  
  
                /* drop preamble IBs if we don't have a context switch */
                if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
 -                      skip_preamble &&
 -                      !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
 -                      !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
 +                  skip_preamble &&
 +                  !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
 +                  !amdgpu_mcbp &&
 +                  !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
                        continue;
  
                amdgpu_ring_emit_ib(ring, job, ib, status);
@@@ -24,7 -24,7 +24,7 @@@
  
  #include <linux/idr.h>
  #include <linux/dma-fence-array.h>
- #include <drm/drmP.h>
  
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
@@@ -364,11 -364,8 +364,11 @@@ static int amdgpu_vmid_grab_used(struc
                if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
                        needs_flush = true;
  
 -              /* Concurrent flushes are only possible starting with Vega10 */
 -              if (adev->asic_type < CHIP_VEGA10 && needs_flush)
 +              /* Concurrent flushes are only possible starting with Vega10 and
 +               * are broken on Navi10 and Navi14.
 +               */
 +              if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
 +                                  adev->asic_type == CHIP_NAVI10))
                        continue;
  
                /* Good, we can use this VMID. Remember this submission as
@@@ -25,8 -25,9 +25,9 @@@
   *          Alex Deucher
   *          Jerome Glisse
   */
- #include <drm/drmP.h>
  #include "amdgpu.h"
+ #include <drm/drm_debugfs.h>
  #include <drm/amdgpu_drm.h>
  #include "amdgpu_sched.h"
  #include "amdgpu_uvd.h"
@@@ -35,6 -36,8 +36,8 @@@
  
  #include <linux/vga_switcheroo.h>
  #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ #include <linux/pci.h>
  #include <linux/pm_runtime.h>
  #include "amdgpu_amdkfd.h"
  #include "amdgpu_gem.h"
@@@ -709,7 -712,7 +712,7 @@@ static int amdgpu_info_ioctl(struct drm
                dev_info.ids_flags = 0;
                if (adev->flags & AMD_IS_APU)
                        dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
 -              if (amdgpu_sriov_vf(adev))
 +              if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
                        dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
  
                vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
                dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
                dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
  
 +              if (adev->family >= AMDGPU_FAMILY_NV)
 +                      dev_info.pa_sc_tile_steering_override =
 +                              adev->gfx.config.pa_sc_tile_steering_override;
 +
                return copy_to_user(out, &dev_info,
                                    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
        }
@@@ -1007,7 -1006,7 +1010,7 @@@ int amdgpu_driver_open_kms(struct drm_d
                goto error_vm;
        }
  
 -      if (amdgpu_sriov_vf(adev)) {
 +      if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
  
                r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
@@@ -1070,7 -1069,7 +1073,7 @@@ void amdgpu_driver_postclose_kms(struc
  
        amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
  
 -      if (amdgpu_sriov_vf(adev)) {
 +      if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                /* TODO: how to handle reserve failure */
                BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
                amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
@@@ -47,7 -47,7 +47,7 @@@
  #include <linux/module.h>
  #include <linux/hmm.h>
  #include <linux/interval_tree.h>
- #include <drm/drmP.h>
  #include <drm/drm.h>
  
  #include "amdgpu.h"
@@@ -519,6 -519,7 +519,6 @@@ void amdgpu_hmm_init_range(struct hmm_r
                range->flags = hmm_range_flags;
                range->values = hmm_range_values;
                range->pfn_shift = PAGE_SHIFT;
 -              range->pfns = NULL;
                INIT_LIST_HEAD(&range->list);
        }
  }
@@@ -31,7 -31,7 +31,7 @@@
   */
  #include <linux/list.h>
  #include <linux/slab.h>
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_cache.h>
  #include "amdgpu.h"
@@@ -495,11 -495,7 +495,11 @@@ static int amdgpu_bo_do_create(struct a
  #endif
  
        bo->tbo.bdev = &adev->mman.bdev;
 -      amdgpu_bo_placement_from_domain(bo, bp->domain);
 +      if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
 +                        AMDGPU_GEM_DOMAIN_GDS))
 +              amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
 +      else
 +              amdgpu_bo_placement_from_domain(bo, bp->domain);
        if (bp->type == ttm_bo_type_kernel)
                bo->tbo.priority = 1;
  
@@@ -979,7 -975,6 +979,7 @@@ static const char *amdgpu_vram_names[] 
        "HBM",
        "DDR3",
        "DDR4",
 +      "GDDR6",
  };
  
  /**
@@@ -22,7 -22,9 +22,9 @@@
   * Authors: RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
   *          Alex Deucher <alexdeucher@gmail.com>
   */
- #include <drm/drmP.h>
+ #include <drm/drm_debugfs.h>
  #include "amdgpu.h"
  #include "amdgpu_drv.h"
  #include "amdgpu_pm.h"
@@@ -31,6 -33,7 +33,7 @@@
  #include "amdgpu_smu.h"
  #include "atom.h"
  #include <linux/power_supply.h>
+ #include <linux/pci.h>
  #include <linux/hwmon.h>
  #include <linux/hwmon-sysfs.h>
  #include <linux/nospec.h>
@@@ -64,9 -67,6 +67,9 @@@ static const struct cg_flag_name clocks
        {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
        {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
        {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
 +
 +      {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
 +      {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
        {0, NULL},
  };
  
@@@ -272,11 -272,8 +275,11 @@@ static ssize_t amdgpu_get_dpm_forced_pe
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_dpm_forced_level level = 0xff;
  
 -      if  ((adev->flags & AMD_IS_PX) &&
 -           (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
 +      if (amdgpu_sriov_vf(adev))
 +              return 0;
 +
 +      if ((adev->flags & AMD_IS_PX) &&
 +          (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return snprintf(buf, PAGE_SIZE, "off\n");
  
        if (is_support_sw_smu(adev))
@@@ -314,12 -311,10 +317,12 @@@ static ssize_t amdgpu_set_dpm_forced_pe
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
  
 -      if (is_support_sw_smu(adev))
 -              current_level = smu_get_performance_level(&adev->smu);
 -      else if (adev->powerplay.pp_funcs->get_performance_level)
 -              current_level = amdgpu_dpm_get_performance_level(adev);
 +      if (!amdgpu_sriov_vf(adev)) {
 +              if (is_support_sw_smu(adev))
 +                      current_level = smu_get_performance_level(&adev->smu);
 +              else if (adev->powerplay.pp_funcs->get_performance_level)
 +                      current_level = amdgpu_dpm_get_performance_level(adev);
 +      }
  
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = AMD_DPM_FORCED_LEVEL_LOW;
        }
  
        if (is_support_sw_smu(adev)) {
 -              mutex_lock(&adev->pm.mutex);
 -              if (adev->pm.dpm.thermal_active) {
 -                      count = -EINVAL;
 -                      mutex_unlock(&adev->pm.mutex);
 -                      goto fail;
 -              }
                ret = smu_force_performance_level(&adev->smu, level);
                if (ret)
                        count = -EINVAL;
 -              else
 -                      adev->pm.dpm.forced_level = level;
 -              mutex_unlock(&adev->pm.mutex);
        } else if (adev->powerplay.pp_funcs->force_performance_level) {
                mutex_lock(&adev->pm.mutex);
                if (adev->pm.dpm.thermal_active) {
@@@ -717,10 -721,10 +720,10 @@@ static ssize_t amdgpu_get_pp_od_clk_vol
        uint32_t size = 0;
  
        if (is_support_sw_smu(adev)) {
 -              size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf);
 -              size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size);
 -              size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size);
 -              size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size);
 +              size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
 +              size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
 +              size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
 +              size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
                return size;
        } else if (adev->powerplay.pp_funcs->print_clock_levels) {
                size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
@@@ -831,7 -835,7 +834,7 @@@ static ssize_t amdgpu_get_pp_dpm_sclk(s
                return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
  
        if (is_support_sw_smu(adev))
 -              return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
 +              return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
        else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
        else
@@@ -884,15 -888,12 +887,15 @@@ static ssize_t amdgpu_set_pp_dpm_sclk(s
        int ret;
        uint32_t mask = 0;
  
 +      if (amdgpu_sriov_vf(adev))
 +              return 0;
 +
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
                return ret;
  
        if (is_support_sw_smu(adev))
 -              ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask);
 +              ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
  
@@@ -909,12 -910,8 +912,12 @@@ static ssize_t amdgpu_get_pp_dpm_mclk(s
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
  
 +      if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
 +          adev->virt.ops->get_pp_clk)
 +              return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
 +
        if (is_support_sw_smu(adev))
 -              return smu_print_clk_levels(&adev->smu, PP_MCLK, buf);
 +              return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
        else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
        else
@@@ -931,15 -928,12 +934,15 @@@ static ssize_t amdgpu_set_pp_dpm_mclk(s
        int ret;
        uint32_t mask = 0;
  
 +      if (amdgpu_sriov_vf(adev))
 +              return 0;
 +
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
                return ret;
  
        if (is_support_sw_smu(adev))
 -              ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask);
 +              ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
  
@@@ -957,7 -951,7 +960,7 @@@ static ssize_t amdgpu_get_pp_dpm_socclk
        struct amdgpu_device *adev = ddev->dev_private;
  
        if (is_support_sw_smu(adev))
 -              return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf);
 +              return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
        else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
        else
@@@ -979,7 -973,7 +982,7 @@@ static ssize_t amdgpu_set_pp_dpm_socclk
                return ret;
  
        if (is_support_sw_smu(adev))
 -              ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask);
 +              ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
  
@@@ -997,7 -991,7 +1000,7 @@@ static ssize_t amdgpu_get_pp_dpm_fclk(s
        struct amdgpu_device *adev = ddev->dev_private;
  
        if (is_support_sw_smu(adev))
 -              return smu_print_clk_levels(&adev->smu, PP_FCLK, buf);
 +              return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
        else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
        else
@@@ -1019,7 -1013,7 +1022,7 @@@ static ssize_t amdgpu_set_pp_dpm_fclk(s
                return ret;
  
        if (is_support_sw_smu(adev))
 -              ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask);
 +              ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
  
@@@ -1037,7 -1031,7 +1040,7 @@@ static ssize_t amdgpu_get_pp_dpm_dcefcl
        struct amdgpu_device *adev = ddev->dev_private;
  
        if (is_support_sw_smu(adev))
 -              return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf);
 +              return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
        else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
        else
@@@ -1059,7 -1053,7 +1062,7 @@@ static ssize_t amdgpu_set_pp_dpm_dcefcl
                return ret;
  
        if (is_support_sw_smu(adev))
 -              ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask);
 +              ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
  
@@@ -1077,7 -1071,7 +1080,7 @@@ static ssize_t amdgpu_get_pp_dpm_pcie(s
        struct amdgpu_device *adev = ddev->dev_private;
  
        if (is_support_sw_smu(adev))
 -              return smu_print_clk_levels(&adev->smu, PP_PCIE, buf);
 +              return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
        else if (adev->powerplay.pp_funcs->print_clock_levels)
                return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
        else
@@@ -1099,7 -1093,7 +1102,7 @@@ static ssize_t amdgpu_set_pp_dpm_pcie(s
                return ret;
  
        if (is_support_sw_smu(adev))
 -              ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask);
 +              ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
        else if (adev->powerplay.pp_funcs->force_clock_level)
                ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
  
@@@ -1118,7 -1112,7 +1121,7 @@@ static ssize_t amdgpu_get_pp_sclk_od(st
        uint32_t value = 0;
  
        if (is_support_sw_smu(adev))
 -              value = smu_get_od_percentage(&(adev->smu), OD_SCLK);
 +              value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
        else if (adev->powerplay.pp_funcs->get_sclk_od)
                value = amdgpu_dpm_get_sclk_od(adev);
  
@@@ -1143,7 -1137,7 +1146,7 @@@ static ssize_t amdgpu_set_pp_sclk_od(st
        }
  
        if (is_support_sw_smu(adev)) {
 -              value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value);
 +              value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
        } else {
                if (adev->powerplay.pp_funcs->set_sclk_od)
                        amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
@@@ -1169,7 -1163,7 +1172,7 @@@ static ssize_t amdgpu_get_pp_mclk_od(st
        uint32_t value = 0;
  
        if (is_support_sw_smu(adev))
 -              value = smu_get_od_percentage(&(adev->smu), OD_MCLK);
 +              value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
        else if (adev->powerplay.pp_funcs->get_mclk_od)
                value = amdgpu_dpm_get_mclk_od(adev);
  
@@@ -1194,7 -1188,7 +1197,7 @@@ static ssize_t amdgpu_set_pp_mclk_od(st
        }
  
        if (is_support_sw_smu(adev)) {
 -              value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value);
 +              value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
        } else {
                if (adev->powerplay.pp_funcs->set_mclk_od)
                        amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
@@@ -2707,49 -2701,10 +2710,48 @@@ void amdgpu_pm_print_power_states(struc
  
  }
  
 +int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev)
 +{
 +      int ret = 0;
 +
 +      if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
 +              return ret;
 +
 +      ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
 +      if (ret) {
 +              DRM_ERROR("failed to create device file pp_dpm_sclk\n");
 +              return ret;
 +      }
 +
 +      ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
 +      if (ret) {
 +              DRM_ERROR("failed to create device file pp_dpm_mclk\n");
 +              return ret;
 +      }
 +
 +      ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
 +      if (ret) {
 +              DRM_ERROR("failed to create device file for dpm state\n");
 +              return ret;
 +      }
 +
 +      return ret;
 +}
 +
 +void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev)
 +{
 +      if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
 +              return;
 +
 +      device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
 +      device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
 +      device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
 +}
 +
  int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
  {
        int r;
  
        if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
                r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
                if (r) {
@@@ -24,7 -24,7 +24,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_psp.h"
  #include "amdgpu_ucode.h"
@@@ -46,19 -46,12 +46,19 @@@ static int psp_early_init(void *handle
        case CHIP_VEGA10:
        case CHIP_VEGA12:
                psp_v3_1_set_psp_funcs(psp);
 +              psp->autoload_supported = false;
                break;
        case CHIP_RAVEN:
                psp_v10_0_set_psp_funcs(psp);
 +              psp->autoload_supported = false;
                break;
        case CHIP_VEGA20:
                psp_v11_0_set_psp_funcs(psp);
 +              psp->autoload_supported = false;
 +              break;
 +      case CHIP_NAVI10:
 +              psp_v11_0_set_psp_funcs(psp);
 +              psp->autoload_supported = true;
                break;
        default:
                return -EINVAL;
@@@ -189,44 -182,10 +189,44 @@@ static void psp_prep_tmr_cmd_buf(struc
        cmd->cmd.cmd_setup_tmr.buf_size = size;
  }
  
 +static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 +                                    uint64_t pri_buf_mc, uint32_t size)
 +{
 +      cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
 +      cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
 +      cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
 +      cmd->cmd.cmd_load_toc.toc_size = size;
 +}
 +
 +/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
 +static int psp_load_toc(struct psp_context *psp,
 +                      uint32_t *tmr_size)
 +{
 +      int ret;
 +      struct psp_gfx_cmd_resp *cmd;
 +
 +      cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 +      if (!cmd)
 +              return -ENOMEM;
 +      /* Copy toc to psp firmware private buffer */
 +      memset(psp->fw_pri_buf, 0, PSP_1_MEG);
 +      memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size);
 +
 +      psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size);
 +
 +      ret = psp_cmd_submit_buf(psp, NULL, cmd,
 +                               psp->fence_buf_mc_addr);
 +      if (!ret)
 +              *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
 +      kfree(cmd);
 +      return ret;
 +}
 +
  /* Set up Trusted Memory Region */
  static int psp_tmr_init(struct psp_context *psp)
  {
        int ret;
 +      int tmr_size;
  
        /*
         * According to HW engineer, they prefer the TMR address be "naturally
         * Note: this memory need be reserved till the driver
         * uninitializes.
         */
 -      ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, PSP_TMR_SIZE,
 +      tmr_size = PSP_TMR_SIZE;
 +
 +      /* For ASICs support RLC autoload, psp will parse the toc
 +       * and calculate the total size of TMR needed */
 +      if (psp->toc_start_addr &&
 +          psp->toc_bin_size &&
 +          psp->fw_pri_buf) {
 +              ret = psp_load_toc(psp, &tmr_size);
 +              if (ret) {
 +                      DRM_ERROR("Failed to load toc\n");
 +                      return ret;
 +              }
 +      }
 +
 +      ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
                                      AMDGPU_GEM_DOMAIN_VRAM,
                                      &psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
  
@@@ -265,10 -210,9 +265,10 @@@ static int psp_tmr_load(struct psp_cont
        if (!cmd)
                return -ENOMEM;
  
 -      psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
 -      DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
 -                      PSP_TMR_SIZE, psp->tmr_mc_addr);
 +      psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr,
 +                           amdgpu_bo_size(psp->tmr_bo));
 +      DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
 +               amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
  
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
@@@ -782,24 -726,12 +782,24 @@@ static int psp_hw_start(struct psp_cont
                return ret;
        }
  
 +      ret = psp_tmr_init(psp);
 +      if (ret) {
 +              DRM_ERROR("PSP tmr init failed!\n");
 +              return ret;
 +      }
 +
        ret = psp_tmr_load(psp);
        if (ret) {
                DRM_ERROR("PSP load tmr failed!\n");
                return ret;
        }
  
 +      ret = psp_asd_init(psp);
 +      if (ret) {
 +              DRM_ERROR("PSP asd init failed!\n");
 +              return ret;
 +      }
 +
        ret = psp_asd_load(psp);
        if (ret) {
                DRM_ERROR("PSP load asd failed!\n");
@@@ -891,12 -823,6 +891,12 @@@ static int psp_get_fw_type(struct amdgp
        case AMDGPU_UCODE_ID_DMCU_INTV:
                *type = GFX_FW_TYPE_DMCU_ISR;
                break;
 +      case AMDGPU_UCODE_ID_VCN0_RAM:
 +              *type = GFX_FW_TYPE_VCN0_RAM;
 +              break;
 +      case AMDGPU_UCODE_ID_VCN1_RAM:
 +              *type = GFX_FW_TYPE_VCN1_RAM;
 +              break;
        case AMDGPU_UCODE_ID_MAXIMUM:
        default:
                return -EINVAL;
@@@ -925,45 -851,19 +925,45 @@@ static int psp_prep_load_ip_fw_cmd_buf(
        return ret;
  }
  
 +static int psp_execute_np_fw_load(struct psp_context *psp,
 +                             struct amdgpu_firmware_info *ucode)
 +{
 +      int ret = 0;
 +
 +      ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
 +      if (ret)
 +              return ret;
 +
 +      ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
 +                               psp->fence_buf_mc_addr);
 +
 +      return ret;
 +}
 +
  static int psp_np_fw_load(struct psp_context *psp)
  {
        int i, ret;
        struct amdgpu_firmware_info *ucode;
        struct amdgpu_device* adev = psp->adev;
  
 +      if (psp->autoload_supported) {
 +              ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
 +              if (!ucode->fw)
 +                      goto out;
 +
 +              ret = psp_execute_np_fw_load(psp, ucode);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +out:
        for (i = 0; i < adev->firmware.max_ucodes; i++) {
                ucode = &adev->firmware.ucode[i];
                if (!ucode->fw)
                        continue;
  
                if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
 -                  psp_smu_reload_quirk(psp))
 +                  (psp_smu_reload_quirk(psp) || psp->autoload_supported))
                        continue;
                if (amdgpu_sriov_vf(adev) &&
                   (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
                    || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
                        /*skip ucode loading in SRIOV VF */
                        continue;
 +              if (psp->autoload_supported &&
 +                  (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
 +                   ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
 +                      /* skip mec JT when autoload is enabled */
 +                      continue;
  
 -              ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
 -              if (ret)
 -                      return ret;
 -
 -              ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
 -                                       psp->fence_buf_mc_addr);
 +              ret = psp_execute_np_fw_load(psp, ucode);
                if (ret)
                        return ret;
  
 +              /* Start rlc autoload after psp recieved all the gfx firmware */
 +              if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
 +                      ret = psp_rlc_autoload(psp);
 +                      if (ret) {
 +                              DRM_ERROR("Failed to start rlc autoload\n");
 +                              return ret;
 +                      }
 +              }
  #if 0
                /* check if firmware loaded sucessfully */
                if (!amdgpu_psp_check_fw_loading_status(adev, i))
@@@ -1047,6 -939,18 +1047,6 @@@ static int psp_load_fw(struct amdgpu_de
                goto failed;
        }
  
 -      ret = psp_tmr_init(psp);
 -      if (ret) {
 -              DRM_ERROR("PSP tmr init failed!\n");
 -              goto failed;
 -      }
 -
 -      ret = psp_asd_init(psp);
 -      if (ret) {
 -              DRM_ERROR("PSP asd init failed!\n");
 -              goto failed;
 -      }
 -
  skip_memalloc:
        ret = psp_hw_start(psp);
        if (ret)
@@@ -1194,39 -1098,6 +1194,39 @@@ int psp_gpu_reset(struct amdgpu_device 
        return psp_mode1_reset(&adev->psp);
  }
  
 +int psp_rlc_autoload_start(struct psp_context *psp)
 +{
 +      int ret;
 +      struct psp_gfx_cmd_resp *cmd;
 +
 +      if (amdgpu_sriov_vf(psp->adev))
 +              return 0;
 +
 +      cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 +      if (!cmd)
 +              return -ENOMEM;
 +
 +      cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
 +
 +      ret = psp_cmd_submit_buf(psp, NULL, cmd,
 +                               psp->fence_buf_mc_addr);
 +      kfree(cmd);
 +      return ret;
 +}
 +
 +int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
 +                      uint64_t cmd_gpu_addr, int cmd_size)
 +{
 +      struct amdgpu_firmware_info ucode = {0};
 +
 +      ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
 +              AMDGPU_UCODE_ID_VCN0_RAM;
 +      ucode.mc_addr = cmd_gpu_addr;
 +      ucode.ucode_size = cmd_size;
 +
 +      return psp_execute_np_fw_load(&adev->psp, &ucode);
 +}
 +
  static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
                                        enum AMDGPU_UCODE_ID ucode_type)
  {
@@@ -24,6 -24,8 +24,8 @@@
  #include <linux/debugfs.h>
  #include <linux/list.h>
  #include <linux/module.h>
+ #include <linux/uaccess.h>
  #include "amdgpu.h"
  #include "amdgpu_ras.h"
  #include "amdgpu_atomfirmware.h"
@@@ -333,13 -335,12 +335,13 @@@ static ssize_t amdgpu_ras_debugfs_ctrl_
        case 2:
                ret = amdgpu_ras_reserve_vram(adev,
                                data.inject.address, PAGE_SIZE, &bo);
 -              /* This address might be used already on failure. In fact we can
 -               * perform an injection in such case.
 -               */
 -              if (ret)
 -                      break;
 -              data.inject.address = amdgpu_bo_gpu_offset(bo);
 +              if (ret) {
 +                      /* address was offset, now it is absolute.*/
 +                      data.inject.address += adev->gmc.vram_start;
 +                      if (data.inject.address > adev->gmc.vram_end)
 +                              break;
 +              } else
 +                      data.inject.address = amdgpu_bo_gpu_offset(bo);
                ret = amdgpu_ras_error_inject(adev, &data.inject);
                amdgpu_ras_release_vram(adev, &bo);
                break;
@@@ -968,24 -969,40 +970,24 @@@ static int amdgpu_ras_sysfs_remove_all(
  /* sysfs end */
  
  /* debugfs begin */
 -static int amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
 +static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
  {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct drm_minor *minor = adev->ddev->primary;
 -      struct dentry *root = minor->debugfs_root, *dir;
 -      struct dentry *ent;
 -
 -      dir = debugfs_create_dir("ras", root);
 -      if (IS_ERR(dir))
 -              return -EINVAL;
  
 -      con->dir = dir;
 -
 -      ent = debugfs_create_file("ras_ctrl",
 -                      S_IWUGO | S_IRUGO, con->dir,
 -                      adev, &amdgpu_ras_debugfs_ctrl_ops);
 -      if (IS_ERR(ent)) {
 -              debugfs_remove(con->dir);
 -              return -EINVAL;
 -      }
 -
 -      con->ent = ent;
 -      return 0;
 +      con->dir = debugfs_create_dir("ras", minor->debugfs_root);
 +      con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
 +                                     adev, &amdgpu_ras_debugfs_ctrl_ops);
  }
  
 -int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
 +void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
                struct ras_fs_if *head)
  {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
 -      struct dentry *ent;
  
        if (!obj || obj->ent)
 -              return -EINVAL;
 +              return;
  
        get_obj(obj);
  
                        head->debugfs_name,
                        sizeof(obj->fs_data.debugfs_name));
  
 -      ent = debugfs_create_file(obj->fs_data.debugfs_name,
 -                      S_IWUGO | S_IRUGO, con->dir,
 -                      obj, &amdgpu_ras_debugfs_ops);
 -
 -      if (IS_ERR(ent))
 -              return -EINVAL;
 -
 -      obj->ent = ent;
 -
 -      return 0;
 +      obj->ent = debugfs_create_file(obj->fs_data.debugfs_name,
 +                                     S_IWUGO | S_IRUGO, con->dir, obj,
 +                                     &amdgpu_ras_debugfs_ops);
  }
  
 -int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
 +void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
                struct ras_common_if *head)
  {
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
  
        if (!obj || !obj->ent)
 -              return 0;
 +              return;
  
        debugfs_remove(obj->ent);
        obj->ent = NULL;
        put_obj(obj);
 -
 -      return 0;
  }
  
 -static int amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
 +static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
  {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj, *tmp;
        debugfs_remove(con->dir);
        con->dir = NULL;
        con->ent = NULL;
 -
 -      return 0;
  }
  /* debugfs end */
  
@@@ -28,8 -28,9 +28,9 @@@
   */
  #include <linux/seq_file.h>
  #include <linux/slab.h>
+ #include <linux/uaccess.h>
  #include <linux/debugfs.h>
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
  #include "amdgpu.h"
  #include "atom.h"
@@@ -281,16 -282,6 +282,16 @@@ int amdgpu_ring_init(struct amdgpu_devi
                return r;
        }
  
 +      r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
 +      if (r) {
 +              dev_err(adev->dev,
 +                      "(%d) ring trail_fence_offs wb alloc failed\n", r);
 +              return r;
 +      }
 +      ring->trail_fence_gpu_addr =
 +              adev->wb.gpu_addr + (ring->trail_fence_offs * 4);
 +      ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs];
 +
        r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
        if (r) {
                dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
@@@ -409,7 -400,7 +410,7 @@@ bool amdgpu_ring_soft_recovery(struct a
  {
        ktime_t deadline = ktime_add_us(ktime_get(), 10000);
  
 -      if (!ring->funcs->soft_recovery || !fence)
 +      if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
                return false;
  
        atomic_inc(&ring->adev->gpu_reset_counter);
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_sdma.h"
  
 +#define AMDGPU_CSA_SDMA_SIZE 64
 +/* SDMA CSA reside in the 3rd page of CSA */
 +#define AMDGPU_CSA_SDMA_OFFSET (4096 * 2)
 +
  /*
   * GPU SDMA IP block helpers function.
   */
@@@ -60,26 -56,3 +60,26 @@@ int amdgpu_sdma_get_index_from_ring(str
  
        return -EINVAL;
  }
 +
 +uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
 +                                   unsigned vmid)
 +{
 +      struct amdgpu_device *adev = ring->adev;
 +      uint64_t csa_mc_addr;
 +      uint32_t index = 0;
 +      int r;
 +
 +      if (vmid == 0 || !amdgpu_mcbp)
 +              return 0;
 +
 +      r = amdgpu_sdma_get_index_from_ring(ring, &index);
 +
 +      if (r || index > 31)
 +              csa_mc_addr = 0;
 +      else
 +              csa_mc_addr = amdgpu_csa_vaddr(adev) +
 +                      AMDGPU_CSA_SDMA_OFFSET +
 +                      index * AMDGPU_CSA_SDMA_SIZE;
 +
 +      return csa_mc_addr;
 +}
   *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
   *    Dave Airlie
   */
+ #include <linux/dma-mapping.h>
+ #include <linux/iommu.h>
+ #include <linux/hmm.h>
+ #include <linux/pagemap.h>
+ #include <linux/sched/task.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/swap.h>
+ #include <linux/swiotlb.h>
  #include <drm/ttm/ttm_bo_api.h>
  #include <drm/ttm/ttm_bo_driver.h>
  #include <drm/ttm/ttm_placement.h>
  #include <drm/ttm/ttm_module.h>
  #include <drm/ttm/ttm_page_alloc.h>
- #include <drm/drmP.h>
+ #include <drm/drm_debugfs.h>
  #include <drm/amdgpu_drm.h>
- #include <linux/seq_file.h>
- #include <linux/slab.h>
- #include <linux/swiotlb.h>
- #include <linux/swap.h>
- #include <linux/pagemap.h>
- #include <linux/debugfs.h>
- #include <linux/iommu.h>
- #include <linux/hmm.h>
  #include "amdgpu.h"
  #include "amdgpu_object.h"
  #include "amdgpu_trace.h"
@@@ -711,7 -716,8 +716,7 @@@ struct amdgpu_ttm_tt 
        struct task_struct      *usertask;
        uint32_t                userflags;
  #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 -      struct hmm_range        *ranges;
 -      int                     nr_ranges;
 +      struct hmm_range        *range;
  #endif
  };
  
   */
  #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
  
 -/* Support Userptr pages cross max 16 vmas */
 -#define MAX_NR_VMAS   (16)
 +#define MAX_RETRY_HMM_RANGE_FAULT     16
  
  int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        struct mm_struct *mm = gtt->usertask->mm;
        unsigned long start = gtt->userptr;
 -      unsigned long end = start + ttm->num_pages * PAGE_SIZE;
 -      struct vm_area_struct *vma = NULL, *vmas[MAX_NR_VMAS];
 -      struct hmm_range *ranges;
 -      unsigned long nr_pages, i;
 -      uint64_t *pfns, f;
 +      struct vm_area_struct *vma;
 +      struct hmm_range *range;
 +      unsigned long i;
 +      uint64_t *pfns;
 +      int retry = 0;
        int r = 0;
  
        if (!mm) /* Happens during process shutdown */
                return -ESRCH;
  
 -      down_read(&mm->mmap_sem);
 -
 -      /* user pages may cross multiple VMAs */
 -      gtt->nr_ranges = 0;
 -      do {
 -              unsigned long vm_start;
 -
 -              if (gtt->nr_ranges >= MAX_NR_VMAS) {
 -                      DRM_ERROR("Too many VMAs in userptr range\n");
 -                      r = -EFAULT;
 -                      goto out;
 -              }
 -
 -              vm_start = vma ? vma->vm_end : start;
 -              vma = find_vma(mm, vm_start);
 -              if (unlikely(!vma || vm_start < vma->vm_start)) {
 -                      r = -EFAULT;
 -                      goto out;
 -              }
 -              vmas[gtt->nr_ranges++] = vma;
 -      } while (end > vma->vm_end);
 -
 -      DRM_DEBUG_DRIVER("0x%lx nr_ranges %d pages 0x%lx\n",
 -              start, gtt->nr_ranges, ttm->num_pages);
 -
 +      vma = find_vma(mm, start);
 +      if (unlikely(!vma || start < vma->vm_start)) {
 +              r = -EFAULT;
 +              goto out;
 +      }
        if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
 -              vmas[0]->vm_file)) {
 +              vma->vm_file)) {
                r = -EPERM;
                goto out;
        }
  
 -      ranges = kvmalloc_array(gtt->nr_ranges, sizeof(*ranges), GFP_KERNEL);
 -      if (unlikely(!ranges)) {
 +      range = kzalloc(sizeof(*range), GFP_KERNEL);
 +      if (unlikely(!range)) {
                r = -ENOMEM;
                goto out;
        }
                goto out_free_ranges;
        }
  
 -      for (i = 0; i < gtt->nr_ranges; i++)
 -              amdgpu_hmm_init_range(&ranges[i]);
 +      amdgpu_hmm_init_range(range);
 +      range->default_flags = range->flags[HMM_PFN_VALID];
 +      range->default_flags |= amdgpu_ttm_tt_is_readonly(ttm) ?
 +                              0 : range->flags[HMM_PFN_WRITE];
 +      range->pfn_flags_mask = 0;
 +      range->pfns = pfns;
 +      hmm_range_register(range, mm, start,
 +                         start + ttm->num_pages * PAGE_SIZE, PAGE_SHIFT);
  
 -      f = ranges[0].flags[HMM_PFN_VALID];
 -      f |= amdgpu_ttm_tt_is_readonly(ttm) ?
 -                              0 : ranges[0].flags[HMM_PFN_WRITE];
 -      memset64(pfns, f, ttm->num_pages);
 +retry:
 +      /*
 +       * Just wait for range to be valid, safe to ignore return value as we
 +       * will use the return value of hmm_range_fault() below under the
 +       * mmap_sem to ascertain the validity of the range.
 +       */
 +      hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
  
 -      for (nr_pages = 0, i = 0; i < gtt->nr_ranges; i++) {
 -              ranges[i].vma = vmas[i];
 -              ranges[i].start = max(start, vmas[i]->vm_start);
 -              ranges[i].end = min(end, vmas[i]->vm_end);
 -              ranges[i].pfns = pfns + nr_pages;
 -              nr_pages += (ranges[i].end - ranges[i].start) / PAGE_SIZE;
 +      down_read(&mm->mmap_sem);
  
 -              r = hmm_vma_fault(&ranges[i], true);
 -              if (unlikely(r))
 -                      break;
 -      }
 -      if (unlikely(r)) {
 -              while (i--)
 -                      hmm_vma_range_done(&ranges[i]);
 +      r = hmm_range_fault(range, true);
 +      if (unlikely(r < 0)) {
 +              if (likely(r == -EAGAIN)) {
 +                      /*
 +                       * return -EAGAIN, mmap_sem is dropped
 +                       */
 +                      if (retry++ < MAX_RETRY_HMM_RANGE_FAULT)
 +                              goto retry;
 +                      else
 +                              pr_err("Retry hmm fault too many times\n");
 +              }
  
 -              goto out_free_pfns;
 +              goto out_up_read;
        }
  
        up_read(&mm->mmap_sem);
  
        for (i = 0; i < ttm->num_pages; i++) {
 -              pages[i] = hmm_pfn_to_page(&ranges[0], pfns[i]);
 -              if (!pages[i]) {
 +              pages[i] = hmm_device_entry_to_page(range, pfns[i]);
 +              if (unlikely(!pages[i])) {
                        pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
                               i, pfns[i]);
 -                      goto out_invalid_pfn;
 +                      r = -ENOMEM;
 +
 +                      goto out_free_pfns;
                }
        }
 -      gtt->ranges = ranges;
 +
 +      gtt->range = range;
  
        return 0;
  
 +out_up_read:
 +      if (likely(r != -EAGAIN))
 +              up_read(&mm->mmap_sem);
  out_free_pfns:
 +      hmm_range_unregister(range);
        kvfree(pfns);
  out_free_ranges:
 -      kvfree(ranges);
 +      kfree(range);
  out:
 -      up_read(&mm->mmap_sem);
 -
        return r;
 -
 -out_invalid_pfn:
 -      for (i = 0; i < gtt->nr_ranges; i++)
 -              hmm_vma_range_done(&ranges[i]);
 -      kvfree(pfns);
 -      kvfree(ranges);
 -      return -ENOMEM;
  }
  
  /**
@@@ -837,23 -858,23 +842,23 @@@ bool amdgpu_ttm_tt_get_user_pages_done(
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        bool r = false;
 -      int i;
  
        if (!gtt || !gtt->userptr)
                return false;
  
 -      DRM_DEBUG_DRIVER("user_pages_done 0x%llx nr_ranges %d pages 0x%lx\n",
 -              gtt->userptr, gtt->nr_ranges, ttm->num_pages);
 +      DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
 +              gtt->userptr, ttm->num_pages);
  
 -      WARN_ONCE(!gtt->ranges || !gtt->ranges[0].pfns,
 +      WARN_ONCE(!gtt->range || !gtt->range->pfns,
                "No user pages to check\n");
  
 -      if (gtt->ranges) {
 -              for (i = 0; i < gtt->nr_ranges; i++)
 -                      r |= hmm_vma_range_done(&gtt->ranges[i]);
 -              kvfree(gtt->ranges[0].pfns);
 -              kvfree(gtt->ranges);
 -              gtt->ranges = NULL;
 +      if (gtt->range) {
 +              r = hmm_range_valid(gtt->range);
 +              hmm_range_unregister(gtt->range);
 +
 +              kvfree(gtt->range->pfns);
 +              kfree(gtt->range);
 +              gtt->range = NULL;
        }
  
        return r;
@@@ -937,9 -958,9 +942,9 @@@ static void amdgpu_ttm_tt_unpin_userptr
        sg_free_table(ttm->sg);
  
  #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 -      if (gtt->ranges &&
 -          ttm->pages[0] == hmm_pfn_to_page(&gtt->ranges[0],
 -                                           gtt->ranges[0].pfns[0]))
 +      if (gtt->range &&
 +          ttm->pages[0] == hmm_device_entry_to_page(gtt->range,
 +                                                    gtt->range->pfns[0]))
                WARN_ONCE(1, "Missing get_user_page_done\n");
  #endif
  }
@@@ -962,8 -983,8 +967,8 @@@ int amdgpu_ttm_gart_bind(struct amdgpu_
                        goto gart_bind_fail;
  
                /* Patch mtype of the second part BO */
 -              flags &=  ~AMDGPU_PTE_MTYPE_MASK;
 -              flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC);
 +              flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
 +              flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
  
                r = amdgpu_gart_bind(adev,
                                gtt->offset + (page_idx << PAGE_SHIFT),
@@@ -24,7 -24,7 +24,7 @@@
  #include <linux/firmware.h>
  #include <linux/slab.h>
  #include <linux/module.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_ucode.h"
  
@@@ -77,14 -77,6 +77,14 @@@ void amdgpu_ucode_print_smc_hdr(const s
                        container_of(hdr, struct smc_firmware_header_v1_0, header);
  
                DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
 +      } else if (version_major == 2) {
 +              const struct smc_firmware_header_v1_0 *v1_hdr =
 +                      container_of(hdr, struct smc_firmware_header_v1_0, header);
 +              const struct smc_firmware_header_v2_0 *v2_hdr =
 +                      container_of(v1_hdr, struct smc_firmware_header_v2_0, v1_0);
 +
 +              DRM_INFO("ppt_offset_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_offset_bytes));
 +              DRM_INFO("ppt_size_bytes: %u\n", le32_to_cpu(v2_hdr->ppt_size_bytes));
        } else {
                DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
        }
@@@ -235,40 -227,6 +235,40 @@@ void amdgpu_ucode_print_sdma_hdr(const 
        }
  }
  
 +void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
 +{
 +      uint16_t version_major = le16_to_cpu(hdr->header_version_major);
 +      uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
 +
 +      DRM_DEBUG("PSP\n");
 +      amdgpu_ucode_print_common_hdr(hdr);
 +
 +      if (version_major == 1) {
 +              const struct psp_firmware_header_v1_0 *psp_hdr =
 +                      container_of(hdr, struct psp_firmware_header_v1_0, header);
 +
 +              DRM_DEBUG("ucode_feature_version: %u\n",
 +                        le32_to_cpu(psp_hdr->ucode_feature_version));
 +              DRM_DEBUG("sos_offset_bytes: %u\n",
 +                        le32_to_cpu(psp_hdr->sos_offset_bytes));
 +              DRM_DEBUG("sos_size_bytes: %u\n",
 +                        le32_to_cpu(psp_hdr->sos_size_bytes));
 +              if (version_minor == 1) {
 +                      const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 =
 +                              container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0);
 +                      DRM_DEBUG("toc_header_version: %u\n",
 +                                le32_to_cpu(psp_hdr_v1_1->toc_header_version));
 +                      DRM_DEBUG("toc_offset_bytes: %u\n",
 +                                le32_to_cpu(psp_hdr_v1_1->toc_offset_bytes));
 +                      DRM_DEBUG("toc_size_bytes: %u\n",
 +                                le32_to_cpu(psp_hdr_v1_1->toc_size_bytes));
 +              }
 +      } else {
 +              DRM_ERROR("Unknown PSP ucode version: %u.%u\n",
 +                        version_major, version_minor);
 +      }
 +}
 +
  void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr)
  {
        uint16_t version_major = le16_to_cpu(hdr->header_version_major);
@@@ -344,7 -302,6 +344,7 @@@ amdgpu_ucode_get_load_type(struct amdgp
        case CHIP_RAVEN:
        case CHIP_VEGA12:
        case CHIP_VEGA20:
 +      case CHIP_NAVI10:
                if (!load_type)
                        return AMDGPU_FW_LOAD_DIRECT;
                else
@@@ -26,7 -26,8 +26,8 @@@
  
  #include <linux/firmware.h>
  #include <linux/module.h>
- #include <drm/drmP.h>
+ #include <linux/pci.h>
  #include <drm/drm.h>
  
  #include "amdgpu.h"
  #define FIRMWARE_RAVEN                "amdgpu/raven_vcn.bin"
  #define FIRMWARE_PICASSO      "amdgpu/picasso_vcn.bin"
  #define FIRMWARE_RAVEN2               "amdgpu/raven2_vcn.bin"
 +#define FIRMWARE_NAVI10       "amdgpu/navi10_vcn.bin"
  
  MODULE_FIRMWARE(FIRMWARE_RAVEN);
  MODULE_FIRMWARE(FIRMWARE_PICASSO);
  MODULE_FIRMWARE(FIRMWARE_RAVEN2);
 +MODULE_FIRMWARE(FIRMWARE_NAVI10);
  
  static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
  
@@@ -73,12 -72,6 +74,12 @@@ int amdgpu_vcn_sw_init(struct amdgpu_de
                else
                        fw_name = FIRMWARE_RAVEN;
                break;
 +      case CHIP_NAVI10:
 +              fw_name = FIRMWARE_NAVI10;
 +              if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 +                  (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 +                      adev->vcn.indirect_sram = true;
 +              break;
        default:
                return -EINVAL;
        }
                return r;
        }
  
 +      if (adev->vcn.indirect_sram) {
 +              r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
 +                          AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
 +                          &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
 +              if (r) {
 +                      dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
 +                      return r;
 +              }
 +      }
 +
        return 0;
  }
  
@@@ -159,12 -142,6 +160,12 @@@ int amdgpu_vcn_sw_fini(struct amdgpu_de
  
        kvfree(adev->vcn.saved_bo);
  
 +      if (adev->vcn.indirect_sram) {
 +              amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
 +                            &adev->vcn.dpg_sram_gpu_addr,
 +                            (void **)&adev->vcn.dpg_sram_cpu_addr);
 +      }
 +
        amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
                              &adev->vcn.gpu_addr,
                              (void **)&adev->vcn.cpu_addr);
@@@ -268,7 -245,7 +269,7 @@@ static void amdgpu_vcn_idle_work_handle
  
        if (fences == 0) {
                amdgpu_gfx_off_ctrl(adev, true);
 -              if (adev->pm.dpm_enabled)
 +              if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
                        amdgpu_dpm_enable_uvd(adev, false);
                else
                        amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
@@@ -285,7 -262,7 +286,7 @@@ void amdgpu_vcn_ring_begin_use(struct a
  
        if (set_clocks) {
                amdgpu_gfx_off_ctrl(adev, false);
 -              if (adev->pm.dpm_enabled)
 +              if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled)
                        amdgpu_dpm_enable_uvd(adev, true);
                else
                        amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
@@@ -331,18 -308,20 +332,18 @@@ int amdgpu_vcn_dec_ring_test_ring(struc
        unsigned i;
        int r;
  
 -      WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
 +      WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
        if (r)
                return r;
 -
 -      amdgpu_ring_write(ring,
 -              PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
 +      amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);
        for (i = 0; i < adev->usec_timeout; i++) {
 -              tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
 +              tmp = RREG32(adev->vcn.external.scratch9);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -368,14 -347,14 +369,14 @@@ static int amdgpu_vcn_dec_send_msg(stru
  
        ib = &job->ibs[0];
        addr = amdgpu_bo_gpu_offset(bo);
 -      ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
 +      ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
        ib->ptr[1] = addr;
 -      ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
 +      ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
        ib->ptr[3] = addr >> 32;
 -      ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
 +      ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
        ib->ptr[5] = 0;
        for (i = 6; i < 16; i += 2) {
 -              ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
 +              ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
                ib->ptr[i+1] = 0;
        }
        ib->length_dw = 16;
@@@ -506,7 -485,7 +507,7 @@@ int amdgpu_vcn_enc_ring_test_ring(struc
        for (i = 0; i < adev->usec_timeout; i++) {
                if (amdgpu_ring_get_rptr(ring) != rptr)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -650,20 -629,22 +651,20 @@@ int amdgpu_vcn_jpeg_ring_test_ring(stru
        unsigned i;
        int r;
  
 -      WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
 +      WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
 -
        if (r)
                return r;
  
 -      amdgpu_ring_write(ring,
 -              PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
 +      amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);
  
        for (i = 0; i < adev->usec_timeout; i++) {
 -              tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
 +              tmp = RREG32(adev->vcn.external.jpeg_pitch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -688,7 -669,7 +689,7 @@@ static int amdgpu_vcn_jpeg_set_reg(stru
  
        ib = &job->ibs[0];
  
 -      ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0);
 +      ib->ptr[0] = PACKETJ(adev->vcn.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
        ib->ptr[1] = 0xDEADBEEF;
        for (i = 2; i < 16; i += 2) {
                ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
@@@ -734,10 -715,10 +735,10 @@@ int amdgpu_vcn_jpeg_ring_test_ib(struc
        }
  
        for (i = 0; i < adev->usec_timeout; i++) {
 -              tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
 +              tmp = RREG32(adev->vcn.external.jpeg_pitch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -28,7 -28,7 +28,7 @@@
  #include <linux/dma-fence-array.h>
  #include <linux/interval_tree_generic.h>
  #include <linux/idr.h>
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
@@@ -1574,22 -1574,12 +1574,22 @@@ static int amdgpu_vm_bo_split_mapping(s
        flags &= ~AMDGPU_PTE_EXECUTABLE;
        flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
  
 -      flags &= ~AMDGPU_PTE_MTYPE_MASK;
 -      flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
 +      if (adev->asic_type == CHIP_NAVI10) {
 +              flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
 +              flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
 +      } else {
 +              flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
 +              flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
 +      }
  
        if ((mapping->flags & AMDGPU_PTE_PRT) &&
            (adev->asic_type >= CHIP_VEGA10)) {
                flags |= AMDGPU_PTE_PRT;
 +              if (adev->asic_type >= CHIP_NAVI10) {
 +                      flags |= AMDGPU_PTE_SNOOPED;
 +                      flags |= AMDGPU_PTE_LOG;
 +                      flags |= AMDGPU_PTE_SYSTEM;
 +              }
                flags &= ~AMDGPU_PTE_VALID;
        }
  
@@@ -22,7 -22,6 +22,6 @@@
   * Authors: Christian König
   */
  
- #include <drm/drmP.h>
  #include "amdgpu.h"
  
  struct amdgpu_vram_mgr {
@@@ -276,7 -275,7 +275,7 @@@ static int amdgpu_vram_mgr_new(struct t
        struct drm_mm_node *nodes;
        enum drm_mm_insert_mode mode;
        unsigned long lpfn, num_nodes, pages_per_node, pages_left;
 -      uint64_t usage = 0, vis_usage = 0;
 +      uint64_t vis_usage = 0, mem_bytes;
        unsigned i;
        int r;
  
        if (!lpfn)
                lpfn = man->size;
  
 -      if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
 -          amdgpu_vram_page_split == -1) {
 +      /* bail out quickly if there's likely not enough VRAM for this BO */
 +      mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
 +      if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) {
 +              atomic64_sub(mem_bytes, &mgr->usage);
 +              mem->mm_node = NULL;
 +              return 0;
 +      }
 +
 +      if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
                pages_per_node = ~0ul;
                num_nodes = 1;
        } else {
 -              pages_per_node = max((uint32_t)amdgpu_vram_page_split,
 -                                   mem->page_alignment);
 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 +              pages_per_node = HPAGE_PMD_NR;
 +#else
 +              /* default to 2MB */
 +              pages_per_node = (2UL << (20UL - PAGE_SHIFT));
 +#endif
 +              pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment);
                num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
        }
  
 -      nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
 +      nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
                               GFP_KERNEL | __GFP_ZERO);
 -      if (!nodes)
 +      if (!nodes) {
 +              atomic64_sub(mem_bytes, &mgr->usage);
                return -ENOMEM;
 +      }
  
        mode = DRM_MM_INSERT_BEST;
        if (place->flags & TTM_PL_FLAG_TOPDOWN)
                if (unlikely(r))
                        break;
  
 -              usage += nodes[i].size << PAGE_SHIFT;
                vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
                amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
                pages_left -= pages;
                if (unlikely(r))
                        goto error;
  
 -              usage += nodes[i].size << PAGE_SHIFT;
                vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
                amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
                pages_left -= pages;
        }
        spin_unlock(&mgr->lock);
  
 -      atomic64_add(usage, &mgr->usage);
        atomic64_add(vis_usage, &mgr->vis_usage);
  
        mem->mm_node = nodes;
@@@ -366,7 -354,6 +365,7 @@@ error
        while (i--)
                drm_mm_remove_node(&nodes[i]);
        spin_unlock(&mgr->lock);
 +      atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
  
        kvfree(nodes);
        return r == -ENOSPC ? 0 : r;
@@@ -20,7 -20,9 +20,9 @@@
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
- #include <drm/drmP.h>
+ #include <drm/drm_vblank.h>
  #include "amdgpu.h"
  #include "amdgpu_pm.h"
  #include "amdgpu_i2c.h"
@@@ -455,7 -457,6 +457,7 @@@ static int dce_virtual_hw_init(void *ha
        case CHIP_VEGA10:
        case CHIP_VEGA12:
        case CHIP_VEGA20:
 +      case CHIP_NAVI10:
                break;
        default:
                DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
@@@ -21,6 -21,8 +21,8 @@@
   *
   */
  #include <linux/firmware.h>
+ #include <linux/module.h>
  #include "amdgpu.h"
  #include "amdgpu_ih.h"
  #include "amdgpu_gfx.h"
@@@ -1812,7 -1814,7 +1814,7 @@@ static int gfx_v6_0_ring_test_ring(stru
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -3113,7 -3115,7 +3115,7 @@@ static int gfx_v6_0_sw_init(void *handl
                ring->ring_obj = NULL;
                sprintf(ring->name, "gfx");
                r = amdgpu_ring_init(adev, ring, 1024,
 -                                   &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 +                                   &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
                if (r)
                        return r;
        }
@@@ -3348,7 -3350,7 +3350,7 @@@ static int gfx_v6_0_set_eop_interrupt_s
                                            enum amdgpu_interrupt_state state)
  {
        switch (type) {
 -      case AMDGPU_CP_IRQ_GFX_EOP:
 +      case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
                gfx_v6_0_set_gfx_eop_interrupt_state(adev, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
  #include "amdgpu.h"
  #include "amdgpu_ih.h"
  #include "amdgpu_gfx.h"
@@@ -2080,7 -2082,7 +2082,7 @@@ static int gfx_v7_0_ring_test_ring(stru
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
        if (i >= adev->usec_timeout)
                r = -ETIMEDOUT;
@@@ -4460,7 -4462,7 +4462,7 @@@ static int gfx_v7_0_sw_init(void *handl
                ring->ring_obj = NULL;
                sprintf(ring->name, "gfx");
                r = amdgpu_ring_init(adev, ring, 1024,
 -                                   &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 +                                   &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
                if (r)
                        return r;
        }
@@@ -4797,7 -4799,7 +4799,7 @@@ static int gfx_v7_0_set_eop_interrupt_s
                                            enum amdgpu_interrupt_state state)
  {
        switch (type) {
 -      case AMDGPU_CP_IRQ_GFX_EOP:
 +      case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
                gfx_v7_0_set_gfx_eop_interrupt_state(adev, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
+ #include <linux/delay.h>
  #include <linux/kernel.h>
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_gfx.h"
  #include "vi.h"
@@@ -855,7 -859,7 +859,7 @@@ static int gfx_v8_0_ring_test_ring(stru
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -2005,7 -2009,7 +2009,7 @@@ static int gfx_v8_0_sw_init(void *handl
                }
  
                r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
 -                                   AMDGPU_CP_IRQ_GFX_EOP);
 +                                   AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
                if (r)
                        return r;
        }
                return r;
  
        /* create MQD for all compute queues as well as KIQ for SRIOV case */
 -      r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation));
 +      r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation));
        if (r)
                return r;
  
@@@ -2065,7 -2069,7 +2069,7 @@@ static int gfx_v8_0_sw_fini(void *handl
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
  
 -      amdgpu_gfx_compute_mqd_sw_fini(adev);
 +      amdgpu_gfx_mqd_sw_fini(adev);
        amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
        amdgpu_gfx_kiq_fini(adev);
  
@@@ -6213,7 -6217,7 +6217,7 @@@ static void gfx_v8_0_pipe_reserve_resou
        struct amdgpu_ring *iring;
  
        mutex_lock(&adev->gfx.pipe_reserve_mutex);
 -      pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
 +      pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
        if (acquire)
                set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
        else
                /* Lower all pipes without a current reservation */
                for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
                        iring = &adev->gfx.gfx_ring[i];
 -                      pipe = amdgpu_gfx_queue_to_bit(adev,
 -                                                     iring->me,
 -                                                     iring->pipe,
 -                                                     0);
 +                      pipe = amdgpu_gfx_mec_queue_to_bit(adev,
 +                                                         iring->me,
 +                                                         iring->pipe,
 +                                                         0);
                        reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
                        gfx_v8_0_ring_set_pipe_percent(iring, reserve);
                }
  
                for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
                        iring = &adev->gfx.compute_ring[i];
 -                      pipe = amdgpu_gfx_queue_to_bit(adev,
 -                                                     iring->me,
 -                                                     iring->pipe,
 -                                                     0);
 +                      pipe = amdgpu_gfx_mec_queue_to_bit(adev,
 +                                                         iring->me,
 +                                                         iring->pipe,
 +                                                         0);
                        reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
                        gfx_v8_0_ring_set_pipe_percent(iring, reserve);
                }
@@@ -6533,7 -6537,7 +6537,7 @@@ static int gfx_v8_0_set_eop_interrupt_s
                                            enum amdgpu_interrupt_state state)
  {
        switch (type) {
 -      case AMDGPU_CP_IRQ_GFX_EOP:
 +      case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
                gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
+ #include <linux/delay.h>
  #include <linux/kernel.h>
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_gfx.h"
  #include "soc15.h"
@@@ -305,7 -309,6 +309,7 @@@ static int gfx_v9_0_get_cu_info(struct 
  static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
  static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
  static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 +static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
  
  static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
  {
@@@ -423,7 -426,7 +427,7 @@@ static int gfx_v9_0_ring_test_ring(stru
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -1721,7 -1724,7 +1725,7 @@@ static int gfx_v9_0_sw_init(void *handl
                ring->use_doorbell = true;
                ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
                r = amdgpu_ring_init(adev, ring, 1024,
 -                                   &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
 +                                   &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
                if (r)
                        return r;
        }
                return r;
  
        /* create MQD for all compute queues as wel as KIQ for SRIOV case */
 -      r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
 +      r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
        if (r)
                return r;
  
@@@ -1799,7 -1802,7 +1803,7 @@@ static int gfx_v9_0_sw_fini(void *handl
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
  
 -      amdgpu_gfx_compute_mqd_sw_fini(adev);
 +      amdgpu_gfx_mqd_sw_fini(adev);
        amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
        amdgpu_gfx_kiq_fini(adev);
  
@@@ -3597,89 -3600,45 +3601,89 @@@ static const struct soc15_reg_entry sgp
  };
  
  static const struct soc15_reg_entry sec_ded_counter_registers[] = {
 -   { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED) },
 -   { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO) },
 -   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2) },
 -   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2) },
 -   { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT) },
 -   { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2) },
 -   { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
 +   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
 +   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
 +   { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
 +   { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
 +   { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
 +   { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
 +   { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
 +   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
  };
  
 +static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
 +{
 +      struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
 +      int i, r;
 +
 +      r = amdgpu_ring_alloc(ring, 7);
 +      if (r) {
 +              DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
 +                      ring->name, r);
 +              return r;
 +      }
 +
 +      WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
 +      WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
 +
 +      amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
 +      amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
 +                              PACKET3_DMA_DATA_DST_SEL(1) |
 +                              PACKET3_DMA_DATA_SRC_SEL(2) |
 +                              PACKET3_DMA_DATA_ENGINE(0)));
 +      amdgpu_ring_write(ring, 0);
 +      amdgpu_ring_write(ring, 0);
 +      amdgpu_ring_write(ring, 0);
 +      amdgpu_ring_write(ring, 0);
 +      amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
 +                              adev->gds.gds_size);
 +
 +      amdgpu_ring_commit(ring);
 +
 +      for (i = 0; i < adev->usec_timeout; i++) {
 +              if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
 +                      break;
 +              udelay(1);
 +      }
 +
 +      if (i >= adev->usec_timeout)
 +              r = -ETIMEDOUT;
 +
 +      WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
 +
 +      return r;
 +}
 +
  static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
  {
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
        struct amdgpu_ib ib;
        struct dma_fence *f = NULL;
 -      int r, i, j;
 +      int r, i, j, k;
        unsigned total_size, vgpr_offset, sgpr_offset;
        u64 gpu_addr;
  
  
        /* read back registers to clear the counters */
        mutex_lock(&adev->grbm_idx_mutex);
 -      for (j = 0; j < 16; j++) {
 -              gfx_v9_0_select_se_sh(adev, 0x01, 0x0, j);
 -              for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
 -                      RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
 -              gfx_v9_0_select_se_sh(adev, 0x02, 0x0, j);
 -              for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
 -                      RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
 -              gfx_v9_0_select_se_sh(adev, 0x03, 0x0, j);
 -              for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
 -                      RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
 -              gfx_v9_0_select_se_sh(adev, 0x04, 0x0, j);
 -              for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
 -                      RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
 +      for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
 +              for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
 +                      for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
 +                              gfx_v9_0_select_se_sh(adev, j, 0x0, k);
 +                              RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
 +                      }
 +              }
        }
        WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
        mutex_unlock(&adev->grbm_idx_mutex);
@@@ -3850,10 -3815,6 +3854,10 @@@ static int gfx_v9_0_ecc_late_init(void 
                return 0;
        }
  
 +      r = gfx_v9_0_do_edc_gds_workarounds(adev);
 +      if (r)
 +              return r;
 +
        /* requires IBs so do in late init after IB pool is initialized */
        r = gfx_v9_0_do_edc_gpr_workarounds(adev);
        if (r)
        if (r)
                goto interrupt;
  
 -      r = amdgpu_ras_debugfs_create(adev, &fs_info);
 -      if (r)
 -              goto debugfs;
 +      amdgpu_ras_debugfs_create(adev, &fs_info);
  
        r = amdgpu_ras_sysfs_create(adev, &fs_info);
        if (r)
@@@ -3918,6 -3881,7 +3922,6 @@@ irq
        amdgpu_ras_sysfs_remove(adev, *ras_if);
  sysfs:
        amdgpu_ras_debugfs_remove(adev, *ras_if);
 -debugfs:
        amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
  interrupt:
        amdgpu_ras_feature_enable(adev, *ras_if, 0);
@@@ -4578,7 -4542,7 +4582,7 @@@ static void gfx_v9_0_pipe_reserve_resou
        struct amdgpu_ring *iring;
  
        mutex_lock(&adev->gfx.pipe_reserve_mutex);
 -      pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
 +      pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
        if (acquire)
                set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
        else
                /* Lower all pipes without a current reservation */
                for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
                        iring = &adev->gfx.gfx_ring[i];
 -                      pipe = amdgpu_gfx_queue_to_bit(adev,
 -                                                     iring->me,
 -                                                     iring->pipe,
 -                                                     0);
 +                      pipe = amdgpu_gfx_mec_queue_to_bit(adev,
 +                                                         iring->me,
 +                                                         iring->pipe,
 +                                                         0);
                        reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
                        gfx_v9_0_ring_set_pipe_percent(iring, reserve);
                }
  
                for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
                        iring = &adev->gfx.compute_ring[i];
 -                      pipe = amdgpu_gfx_queue_to_bit(adev,
 -                                                     iring->me,
 -                                                     iring->pipe,
 -                                                     0);
 +                      pipe = amdgpu_gfx_mec_queue_to_bit(adev,
 +                                                         iring->me,
 +                                                         iring->pipe,
 +                                                         0);
                        reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
                        gfx_v9_0_ring_set_pipe_percent(iring, reserve);
                }
@@@ -5025,7 -4989,7 +5029,7 @@@ static int gfx_v9_0_set_eop_interrupt_s
                                            enum amdgpu_interrupt_state state)
  {
        switch (type) {
 -      case AMDGPU_CP_IRQ_GFX_EOP:
 +      case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
                gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
index acdd186,0000000..cec7c1f
mode 100644,000000..100644
--- /dev/null
@@@ -1,916 -1,0 +1,917 @@@
 +/*
 + * Copyright 2019 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + */
 +#include <linux/firmware.h>
++#include <linux/pci.h>
 +#include "amdgpu.h"
 +#include "amdgpu_atomfirmware.h"
 +#include "gmc_v10_0.h"
 +
 +#include "hdp/hdp_5_0_0_offset.h"
 +#include "hdp/hdp_5_0_0_sh_mask.h"
 +#include "gc/gc_10_1_0_sh_mask.h"
 +#include "mmhub/mmhub_2_0_0_sh_mask.h"
 +#include "dcn/dcn_2_0_0_offset.h"
 +#include "dcn/dcn_2_0_0_sh_mask.h"
 +#include "oss/osssys_5_0_0_offset.h"
 +#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
 +#include "navi10_enum.h"
 +
 +#include "soc15.h"
 +#include "soc15_common.h"
 +
 +#include "nbio_v2_3.h"
 +
 +#include "gfxhub_v2_0.h"
 +#include "mmhub_v2_0.h"
 +#include "athub_v2_0.h"
 +/* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/
 +#define AMDGPU_NUM_OF_VMIDS                   8
 +
 +#if 0
 +static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
 +{
 +      /* TODO add golden setting for hdp */
 +};
 +#endif
 +
 +static int
 +gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
 +                                 struct amdgpu_irq_src *src, unsigned type,
 +                                 enum amdgpu_interrupt_state state)
 +{
 +      struct amdgpu_vmhub *hub;
 +      u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
 +
 +      bits[AMDGPU_GFXHUB] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
 +
 +      bits[AMDGPU_MMHUB] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
 +              MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
 +
 +      switch (state) {
 +      case AMDGPU_IRQ_STATE_DISABLE:
 +              /* MM HUB */
 +              hub = &adev->vmhub[AMDGPU_MMHUB];
 +              for (i = 0; i < 16; i++) {
 +                      reg = hub->vm_context0_cntl + i;
 +                      tmp = RREG32(reg);
 +                      tmp &= ~bits[AMDGPU_MMHUB];
 +                      WREG32(reg, tmp);
 +              }
 +
 +              /* GFX HUB */
 +              hub = &adev->vmhub[AMDGPU_GFXHUB];
 +              for (i = 0; i < 16; i++) {
 +                      reg = hub->vm_context0_cntl + i;
 +                      tmp = RREG32(reg);
 +                      tmp &= ~bits[AMDGPU_GFXHUB];
 +                      WREG32(reg, tmp);
 +              }
 +              break;
 +      case AMDGPU_IRQ_STATE_ENABLE:
 +              /* MM HUB */
 +              hub = &adev->vmhub[AMDGPU_MMHUB];
 +              for (i = 0; i < 16; i++) {
 +                      reg = hub->vm_context0_cntl + i;
 +                      tmp = RREG32(reg);
 +                      tmp |= bits[AMDGPU_MMHUB];
 +                      WREG32(reg, tmp);
 +              }
 +
 +              /* GFX HUB */
 +              hub = &adev->vmhub[AMDGPU_GFXHUB];
 +              for (i = 0; i < 16; i++) {
 +                      reg = hub->vm_context0_cntl + i;
 +                      tmp = RREG32(reg);
 +                      tmp |= bits[AMDGPU_GFXHUB];
 +                      WREG32(reg, tmp);
 +              }
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
 +                                     struct amdgpu_irq_src *source,
 +                                     struct amdgpu_iv_entry *entry)
 +{
 +      struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
 +      uint32_t status = 0;
 +      u64 addr;
 +
 +      addr = (u64)entry->src_data[0] << 12;
 +      addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 +
 +      if (!amdgpu_sriov_vf(adev)) {
 +              status = RREG32(hub->vm_l2_pro_fault_status);
 +              WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
 +      }
 +
 +      if (printk_ratelimit()) {
 +              dev_err(adev->dev,
 +                      "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
 +                      entry->vmid_src ? "mmhub" : "gfxhub",
 +                      entry->src_id, entry->ring_id, entry->vmid,
 +                      entry->pasid);
 +              dev_err(adev->dev, "  at page 0x%016llx from %d\n",
 +                      addr, entry->client_id);
 +              if (!amdgpu_sriov_vf(adev))
 +                      dev_err(adev->dev,
 +                              "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
 +                              status);
 +      }
 +
 +      return 0;
 +}
 +
 +static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
 +      .set = gmc_v10_0_vm_fault_interrupt_state,
 +      .process = gmc_v10_0_process_interrupt,
 +};
 +
 +static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
 +{
 +      adev->gmc.vm_fault.num_types = 1;
 +      adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
 +}
 +
 +static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
 +                                           uint32_t flush_type)
 +{
 +      u32 req = 0;
 +
 +      /* invalidate using legacy mode on vmid*/
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
 +                          PER_VMID_INVALIDATE_REQ, 1 << vmid);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
 +      req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
 +                          CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
 +
 +      return req;
 +}
 +
 +/*
 + * GART
 + * VMID 0 is the physical GPU addresses as used by the kernel.
 + * VMIDs 1-15 are used for userspace clients and are handled
 + * by the amdgpu vm/hsa code.
 + */
 +
 +static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
 +                                 unsigned int vmhub, uint32_t flush_type)
 +{
 +      struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
 +      u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
 +      /* Use register 17 for GART */
 +      const unsigned eng = 17;
 +      unsigned int i;
 +
 +      WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
 +
 +      /* Wait for ACK with a delay.*/
 +      for (i = 0; i < adev->usec_timeout; i++) {
 +              tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
 +              tmp &= 1 << vmid;
 +              if (tmp)
 +                      break;
 +
 +              udelay(1);
 +      }
 +
 +      if (i < adev->usec_timeout)
 +              return;
 +
 +      DRM_ERROR("Timeout waiting for VM flush ACK!\n");
 +}
 +
 +/**
 + * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
 + *
 + * @adev: amdgpu_device pointer
 + * @vmid: vm instance to flush
 + *
 + * Flush the TLB for the requested page table.
 + */
 +static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev,
 +                                  uint32_t vmid, uint32_t flush_type)
 +{
 +      struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 +      struct dma_fence *fence;
 +      struct amdgpu_job *job;
 +
 +      int r;
 +
 +      /* flush hdp cache */
 +      adev->nbio_funcs->hdp_flush(adev, NULL);
 +
 +      mutex_lock(&adev->mman.gtt_window_lock);
 +
 +      gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB, 0);
 +      if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready ||
 +          adev->asic_type != CHIP_NAVI10) {
 +              gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB, 0);
 +              mutex_unlock(&adev->mman.gtt_window_lock);
 +              return;
 +      }
 +
 +      /* The SDMA on Navi has a bug which can theoretically result in memory
 +       * corruption if an invalidation happens at the same time as an VA
 +       * translation. Avoid this by doing the invalidation from the SDMA
 +       * itself.
 +       */
 +      r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
 +      if (r)
 +              goto error_alloc;
 +
 +      job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
 +      job->vm_needs_flush = true;
 +      amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 +      r = amdgpu_job_submit(job, &adev->mman.entity,
 +                            AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
 +      if (r)
 +              goto error_submit;
 +
 +      mutex_unlock(&adev->mman.gtt_window_lock);
 +
 +      dma_fence_wait(fence, false);
 +      dma_fence_put(fence);
 +
 +      return;
 +
 +error_submit:
 +      amdgpu_job_free(job);
 +
 +error_alloc:
 +      mutex_unlock(&adev->mman.gtt_window_lock);
 +      DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
 +}
 +
 +static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
 +                                           unsigned vmid, uint64_t pd_addr)
 +{
 +      struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
 +      uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
 +      unsigned eng = ring->vm_inv_eng;
 +
 +      amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
 +                            lower_32_bits(pd_addr));
 +
 +      amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
 +                            upper_32_bits(pd_addr));
 +
 +      amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
 +
 +      /* wait for the invalidate to complete */
 +      amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
 +                                1 << vmid, 1 << vmid);
 +
 +      return pd_addr;
 +}
 +
 +static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
 +                                       unsigned pasid)
 +{
 +      struct amdgpu_device *adev = ring->adev;
 +      uint32_t reg;
 +
 +      if (ring->funcs->vmhub == AMDGPU_GFXHUB)
 +              reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
 +      else
 +              reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
 +
 +      amdgpu_ring_emit_wreg(ring, reg, pasid);
 +}
 +
 +/*
 + * PTE format on NAVI 10:
 + * 63:59 reserved
 + * 58:57 reserved
 + * 56 F
 + * 55 L
 + * 54 reserved
 + * 53:52 SW
 + * 51 T
 + * 50:48 mtype
 + * 47:12 4k physical page base address
 + * 11:7 fragment
 + * 6 write
 + * 5 read
 + * 4 exe
 + * 3 Z
 + * 2 snooped
 + * 1 system
 + * 0 valid
 + *
 + * PDE format on NAVI 10:
 + * 63:59 block fragment size
 + * 58:55 reserved
 + * 54 P
 + * 53:48 reserved
 + * 47:6 physical base address of PD or PTE
 + * 5:3 reserved
 + * 2 C
 + * 1 system
 + * 0 valid
 + */
 +static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
 +                                         uint32_t flags)
 +{
 +      uint64_t pte_flag = 0;
 +
 +      if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
 +              pte_flag |= AMDGPU_PTE_EXECUTABLE;
 +      if (flags & AMDGPU_VM_PAGE_READABLE)
 +              pte_flag |= AMDGPU_PTE_READABLE;
 +      if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 +              pte_flag |= AMDGPU_PTE_WRITEABLE;
 +
 +      switch (flags & AMDGPU_VM_MTYPE_MASK) {
 +      case AMDGPU_VM_MTYPE_DEFAULT:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 +              break;
 +      case AMDGPU_VM_MTYPE_NC:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 +              break;
 +      case AMDGPU_VM_MTYPE_WC:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
 +              break;
 +      case AMDGPU_VM_MTYPE_CC:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
 +              break;
 +      case AMDGPU_VM_MTYPE_UC:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
 +              break;
 +      default:
 +              pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
 +              break;
 +      }
 +
 +      if (flags & AMDGPU_VM_PAGE_PRT)
 +              pte_flag |= AMDGPU_PTE_PRT;
 +
 +      return pte_flag;
 +}
 +
 +static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
 +                               uint64_t *addr, uint64_t *flags)
 +{
 +      if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
 +              *addr = adev->vm_manager.vram_base_offset + *addr -
 +                      adev->gmc.vram_start;
 +      BUG_ON(*addr & 0xFFFF00000000003FULL);
 +
 +      if (!adev->gmc.translate_further)
 +              return;
 +
 +      if (level == AMDGPU_VM_PDB1) {
 +              /* Set the block fragment size */
 +              if (!(*flags & AMDGPU_PDE_PTE))
 +                      *flags |= AMDGPU_PDE_BFS(0x9);
 +
 +      } else if (level == AMDGPU_VM_PDB0) {
 +              if (*flags & AMDGPU_PDE_PTE)
 +                      *flags &= ~AMDGPU_PDE_PTE;
 +              else
 +                      *flags |= AMDGPU_PTE_TF;
 +      }
 +}
 +
 +static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
 +      .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
 +      .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
 +      .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
 +      .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags,
 +      .get_vm_pde = gmc_v10_0_get_vm_pde
 +};
 +
 +static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
 +{
 +      if (adev->gmc.gmc_funcs == NULL)
 +              adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
 +}
 +
 +static int gmc_v10_0_early_init(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      gmc_v10_0_set_gmc_funcs(adev);
 +      gmc_v10_0_set_irq_funcs(adev);
 +
 +      adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
 +      adev->gmc.shared_aperture_end =
 +              adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
 +      adev->gmc.private_aperture_start = 0x1000000000000000ULL;
 +      adev->gmc.private_aperture_end =
 +              adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_late_init(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +      unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
 +      unsigned i;
 +
 +      for(i = 0; i < adev->num_rings; ++i) {
 +              struct amdgpu_ring *ring = adev->rings[i];
 +              unsigned vmhub = ring->funcs->vmhub;
 +
 +              ring->vm_inv_eng = vm_inv_eng[vmhub]++;
 +              dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
 +                       ring->idx, ring->name, ring->vm_inv_eng,
 +                       ring->funcs->vmhub);
 +      }
 +
 +      /* Engine 17 is used for GART flushes */
 +      for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
 +              BUG_ON(vm_inv_eng[i] > 17);
 +
 +      return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
 +}
 +
 +static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
 +                                      struct amdgpu_gmc *mc)
 +{
 +      u64 base = 0;
 +
 +      if (!amdgpu_sriov_vf(adev))
 +              base = gfxhub_v2_0_get_fb_location(adev);
 +
 +      amdgpu_gmc_vram_location(adev, &adev->gmc, base);
 +      amdgpu_gmc_gart_location(adev, mc);
 +
 +      /* base offset of vram pages */
 +      adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
 +}
 +
 +/**
 + * gmc_v10_0_mc_init - initialize the memory controller driver params
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * Look up the amount of vram, vram width, and decide how to place
 + * vram and gart within the GPU's physical address space.
 + * Returns 0 for success.
 + */
 +static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
 +{
 +      int chansize, numchan;
 +
 +      if (!amdgpu_emu_mode)
 +              adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
 +      else {
 +              /* hard code vram_width for emulation */
 +              chansize = 128;
 +              numchan = 1;
 +              adev->gmc.vram_width = numchan * chansize;
 +      }
 +
 +      /* Could aper size report 0 ? */
 +      adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
 +      adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 +
 +      /* size in MB on si */
 +      adev->gmc.mc_vram_size =
 +              adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
 +      adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
 +      adev->gmc.visible_vram_size = adev->gmc.aper_size;
 +
 +      /* In case the PCI BAR is larger than the actual amount of vram */
 +      if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
 +              adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
 +
 +      /* set the gart size */
 +      if (amdgpu_gart_size == -1) {
 +              switch (adev->asic_type) {
 +              case CHIP_NAVI10:
 +              default:
 +                      adev->gmc.gart_size = 512ULL << 20;
 +                      break;
 +              }
 +      } else
 +              adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
 +
 +      gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
 +{
 +      int r;
 +
 +      if (adev->gart.bo) {
 +              WARN(1, "NAVI10 PCIE GART already initialized\n");
 +              return 0;
 +      }
 +
 +      /* Initialize common gart structure */
 +      r = amdgpu_gart_init(adev);
 +      if (r)
 +              return r;
 +
 +      adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 +      adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
 +                               AMDGPU_PTE_EXECUTABLE;
 +
 +      return amdgpu_gart_table_vram_alloc(adev);
 +}
 +
 +static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
 +{
 +      u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
 +      unsigned size;
 +
 +      if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
 +              size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
 +      } else {
 +              u32 viewport;
 +              u32 pitch;
 +
 +              viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
 +              pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
 +              size = (REG_GET_FIELD(viewport,
 +                                      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
 +                              REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
 +                              4);
 +      }
 +      /* return 0 if the pre-OS buffer uses up most of vram */
 +      if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
 +              DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
 +                              be aware of gart table overwrite\n");
 +              return 0;
 +      }
 +
 +      return size;
 +}
 +
 +
 +
 +static int gmc_v10_0_sw_init(void *handle)
 +{
 +      int r;
 +      int dma_bits;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      gfxhub_v2_0_init(adev);
 +      mmhub_v2_0_init(adev);
 +
 +      spin_lock_init(&adev->gmc.invalidate_lock);
 +
 +      adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
 +      switch (adev->asic_type) {
 +      case CHIP_NAVI10:
 +              /*
 +               * To fulfill 4-level page support,
 +               * vm size is 256TB (48bit), maximum size of Navi10,
 +               * block size 512 (9bit)
 +               */
 +              amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      /* This interrupt is VMC page fault.*/
 +      r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
 +                            VMC_1_0__SRCID__VM_FAULT,
 +                            &adev->gmc.vm_fault);
 +      r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
 +                            UTCL2_1_0__SRCID__FAULT,
 +                            &adev->gmc.vm_fault);
 +      if (r)
 +              return r;
 +
 +      /*
 +       * Set the internal MC address mask This is the max address of the GPU's
 +       * internal address space.
 +       */
 +      adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
 +
 +      /*
 +       * Reserve 8M stolen memory for navi10 like vega10
 +       * TODO: will check if it's really needed on asic.
 +       */
 +      if (amdgpu_emu_mode == 1)
 +              adev->gmc.stolen_size = 0;
 +      else
 +              adev->gmc.stolen_size = 9 * 1024 *1024;
 +
 +      /*
 +       * Set DMA mask + need_dma32 flags.
 +       * PCIE - can handle 44-bits.
 +       * IGP - can handle 44-bits
 +       * PCI - dma32 for legacy pci gart, 44 bits on navi10
 +       */
 +      adev->need_dma32 = false;
 +      dma_bits = adev->need_dma32 ? 32 : 44;
 +
 +      r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 +      if (r) {
 +              adev->need_dma32 = true;
 +              dma_bits = 32;
 +              printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
 +      }
 +
 +      r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
 +      if (r) {
 +              pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
 +              printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
 +      }
 +
 +      r = gmc_v10_0_mc_init(adev);
 +      if (r)
 +              return r;
 +
 +      adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
 +
 +      /* Memory manager */
 +      r = amdgpu_bo_init(adev);
 +      if (r)
 +              return r;
 +
 +      r = gmc_v10_0_gart_init(adev);
 +      if (r)
 +              return r;
 +
 +      /*
 +       * number of VMs
 +       * VMID 0 is reserved for System
 +       * amdgpu graphics/compute will use VMIDs 1-7
 +       * amdkfd will use VMIDs 8-15
 +       */
 +      adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
 +      adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
 +
 +      amdgpu_vm_manager_init(adev);
 +
 +      return 0;
 +}
 +
 +/**
 + * gmc_v8_0_gart_fini - vm fini callback
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * Tears down the driver GART/VM setup (CIK).
 + */
 +static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
 +{
 +      amdgpu_gart_table_vram_free(adev);
 +      amdgpu_gart_fini(adev);
 +}
 +
 +static int gmc_v10_0_sw_fini(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      amdgpu_vm_manager_fini(adev);
 +      gmc_v10_0_gart_fini(adev);
 +      amdgpu_gem_force_release(adev);
 +      amdgpu_bo_fini(adev);
 +
 +      return 0;
 +}
 +
 +static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
 +{
 +      switch (adev->asic_type) {
 +      case CHIP_NAVI10:
 +              break;
 +      default:
 +              break;
 +      }
 +}
 +
 +/**
 + * gmc_v10_0_gart_enable - gart enable
 + *
 + * @adev: amdgpu_device pointer
 + */
 +static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
 +{
 +      int r;
 +      bool value;
 +      u32 tmp;
 +
 +      if (adev->gart.bo == NULL) {
 +              dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 +              return -EINVAL;
 +      }
 +
 +      r = amdgpu_gart_table_vram_pin(adev);
 +      if (r)
 +              return r;
 +
 +      r = gfxhub_v2_0_gart_enable(adev);
 +      if (r)
 +              return r;
 +
 +      r = mmhub_v2_0_gart_enable(adev);
 +      if (r)
 +              return r;
 +
 +      tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
 +      tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
 +      WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
 +
 +      tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
 +      WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
 +
 +      /* Flush HDP after it is initialized */
 +      adev->nbio_funcs->hdp_flush(adev, NULL);
 +
 +      value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
 +              false : true;
 +
 +      gfxhub_v2_0_set_fault_enable_default(adev, value);
 +      mmhub_v2_0_set_fault_enable_default(adev, value);
 +      gmc_v10_0_flush_gpu_tlb(adev, 0, 0);
 +
 +      DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
 +               (unsigned)(adev->gmc.gart_size >> 20),
 +               (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 +
 +      adev->gart.ready = true;
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_hw_init(void *handle)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      /* The sequence of these two function calls matters.*/
 +      gmc_v10_0_init_golden_registers(adev);
 +
 +      r = gmc_v10_0_gart_enable(adev);
 +      if (r)
 +              return r;
 +
 +      return 0;
 +}
 +
 +/**
 + * gmc_v10_0_gart_disable - gart disable
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * This disables all VM page table.
 + */
 +static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
 +{
 +      gfxhub_v2_0_gart_disable(adev);
 +      mmhub_v2_0_gart_disable(adev);
 +      amdgpu_gart_table_vram_unpin(adev);
 +}
 +
 +static int gmc_v10_0_hw_fini(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      if (amdgpu_sriov_vf(adev)) {
 +              /* full access mode, so don't touch any GMC register */
 +              DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 +              return 0;
 +      }
 +
 +      amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 +      gmc_v10_0_gart_disable(adev);
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_suspend(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      gmc_v10_0_hw_fini(adev);
 +
 +      return 0;
 +}
 +
 +static int gmc_v10_0_resume(void *handle)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      r = gmc_v10_0_hw_init(adev);
 +      if (r)
 +              return r;
 +
 +      amdgpu_vmid_reset_all(adev);
 +
 +      return 0;
 +}
 +
 +static bool gmc_v10_0_is_idle(void *handle)
 +{
 +      /* MC is always ready in GMC v10.*/
 +      return true;
 +}
 +
 +static int gmc_v10_0_wait_for_idle(void *handle)
 +{
 +      /* There is no need to wait for MC idle in GMC v10.*/
 +      return 0;
 +}
 +
 +static int gmc_v10_0_soft_reset(void *handle)
 +{
 +      return 0;
 +}
 +
 +static int gmc_v10_0_set_clockgating_state(void *handle,
 +                                         enum amd_clockgating_state state)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      r = mmhub_v2_0_set_clockgating(adev, state);
 +      if (r)
 +              return r;
 +
 +      return athub_v2_0_set_clockgating(adev, state);
 +}
 +
 +static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      mmhub_v2_0_get_clockgating(adev, flags);
 +
 +      athub_v2_0_get_clockgating(adev, flags);
 +}
 +
 +static int gmc_v10_0_set_powergating_state(void *handle,
 +                                         enum amd_powergating_state state)
 +{
 +      return 0;
 +}
 +
 +const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
 +      .name = "gmc_v10_0",
 +      .early_init = gmc_v10_0_early_init,
 +      .late_init = gmc_v10_0_late_init,
 +      .sw_init = gmc_v10_0_sw_init,
 +      .sw_fini = gmc_v10_0_sw_fini,
 +      .hw_init = gmc_v10_0_hw_init,
 +      .hw_fini = gmc_v10_0_hw_fini,
 +      .suspend = gmc_v10_0_suspend,
 +      .resume = gmc_v10_0_resume,
 +      .is_idle = gmc_v10_0_is_idle,
 +      .wait_for_idle = gmc_v10_0_wait_for_idle,
 +      .soft_reset = gmc_v10_0_soft_reset,
 +      .set_clockgating_state = gmc_v10_0_set_clockgating_state,
 +      .set_powergating_state = gmc_v10_0_set_powergating_state,
 +      .get_clockgating_state = gmc_v10_0_get_clockgating_state,
 +};
 +
 +const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
 +{
 +      .type = AMD_IP_BLOCK_TYPE_GMC,
 +      .major = 10,
 +      .minor = 0,
 +      .rev = 0,
 +      .funcs = &gmc_v10_0_ip_funcs,
 +};
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
  #include <linux/firmware.h>
+ #include <linux/pci.h>
  #include <drm/drm_cache.h>
  #include "amdgpu.h"
  #include "gmc_v9_0.h"
  #include "amdgpu_atomfirmware.h"
@@@ -531,22 -535,22 +535,22 @@@ static uint64_t gmc_v9_0_get_vm_pte_fla
  
        switch (flags & AMDGPU_VM_MTYPE_MASK) {
        case AMDGPU_VM_MTYPE_DEFAULT:
 -              pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
 +              pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
                break;
        case AMDGPU_VM_MTYPE_NC:
 -              pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
 +              pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
                break;
        case AMDGPU_VM_MTYPE_WC:
 -              pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
 +              pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
                break;
        case AMDGPU_VM_MTYPE_CC:
 -              pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
 +              pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
                break;
        case AMDGPU_VM_MTYPE_UC:
 -              pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
 +              pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
                break;
        default:
 -              pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
 +              pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
                break;
        }
  
@@@ -624,9 -628,8 +628,8 @@@ static bool gmc_v9_0_keep_stolen_memory
         */
        switch (adev->asic_type) {
        case CHIP_VEGA10:
-               return true;
        case CHIP_RAVEN:
-               return (adev->pdev->device == 0x15d8);
+               return true;
        case CHIP_VEGA12:
        case CHIP_VEGA20:
        default:
@@@ -730,7 -733,9 +733,7 @@@ static int gmc_v9_0_ecc_late_init(void 
        if (r)
                goto interrupt;
  
 -      r = amdgpu_ras_debugfs_create(adev, &fs_info);
 -      if (r)
 -              goto debugfs;
 +      amdgpu_ras_debugfs_create(adev, &fs_info);
  
        r = amdgpu_ras_sysfs_create(adev, &fs_info);
        if (r)
@@@ -745,6 -750,7 +748,6 @@@ irq
        amdgpu_ras_sysfs_remove(adev, *ras_if);
  sysfs:
        amdgpu_ras_debugfs_remove(adev, *ras_if);
 -debugfs:
        amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
  interrupt:
        amdgpu_ras_feature_enable(adev, *ras_if, 0);
@@@ -913,7 -919,7 +916,7 @@@ static int gmc_v9_0_gart_init(struct am
        if (r)
                return r;
        adev->gart.table_size = adev->gart.num_gpu_pages * 8;
 -      adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
 +      adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
                                 AMDGPU_PTE_EXECUTABLE;
        return amdgpu_gart_table_vram_alloc(adev);
  }
index 0d92b88,0000000..29fab79
mode 100644,000000..100644
--- /dev/null
@@@ -1,365 -1,0 +1,366 @@@
 +/*
 + * Copyright 2019 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + */
 +
 +#include <linux/firmware.h>
++#include <linux/module.h>
 +#include "amdgpu.h"
 +#include "soc15_common.h"
 +#include "nv.h"
 +#include "gc/gc_10_1_0_offset.h"
 +#include "gc/gc_10_1_0_sh_mask.h"
 +
 +MODULE_FIRMWARE("amdgpu/navi10_mes.bin");
 +
 +static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes,
 +                                struct mes_add_queue_input *input)
 +{
 +      return 0;
 +}
 +
 +static int mes_v10_1_remove_hw_queue(struct amdgpu_mes *mes,
 +                                   struct mes_remove_queue_input *input)
 +{
 +      return 0;
 +}
 +
 +static int mes_v10_1_suspend_gang(struct amdgpu_mes *mes,
 +                                struct mes_suspend_gang_input *input)
 +{
 +      return 0;
 +}
 +
 +static int mes_v10_1_resume_gang(struct amdgpu_mes *mes,
 +                               struct mes_resume_gang_input *input)
 +{
 +      return 0;
 +}
 +
 +static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
 +      .add_hw_queue = mes_v10_1_add_hw_queue,
 +      .remove_hw_queue = mes_v10_1_remove_hw_queue,
 +      .suspend_gang = mes_v10_1_suspend_gang,
 +      .resume_gang = mes_v10_1_resume_gang,
 +};
 +
 +static int mes_v10_1_init_microcode(struct amdgpu_device *adev)
 +{
 +      const char *chip_name;
 +      char fw_name[30];
 +      int err;
 +      const struct mes_firmware_header_v1_0 *mes_hdr;
 +
 +      switch (adev->asic_type) {
 +      case CHIP_NAVI10:
 +              chip_name = "navi10";
 +              break;
 +      default:
 +              BUG();
 +      }
 +
 +      snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin", chip_name);
 +      err = request_firmware(&adev->mes.fw, fw_name, adev->dev);
 +      if (err)
 +              return err;
 +
 +      err = amdgpu_ucode_validate(adev->mes.fw);
 +      if (err) {
 +              release_firmware(adev->mes.fw);
 +              adev->mes.fw = NULL;
 +              return err;
 +      }
 +
 +      mes_hdr = (const struct mes_firmware_header_v1_0 *)adev->mes.fw->data;
 +      adev->mes.ucode_fw_version = le32_to_cpu(mes_hdr->mes_ucode_version);
 +      adev->mes.ucode_fw_version =
 +              le32_to_cpu(mes_hdr->mes_ucode_data_version);
 +      adev->mes.uc_start_addr =
 +              le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
 +              ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
 +      adev->mes.data_start_addr =
 +              le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
 +              ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
 +
 +      return 0;
 +}
 +
 +static void mes_v10_1_free_microcode(struct amdgpu_device *adev)
 +{
 +      release_firmware(adev->mes.fw);
 +      adev->mes.fw = NULL;
 +}
 +
 +static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev)
 +{
 +      int r;
 +      const struct mes_firmware_header_v1_0 *mes_hdr;
 +      const __le32 *fw_data;
 +      unsigned fw_size;
 +
 +      mes_hdr = (const struct mes_firmware_header_v1_0 *)
 +              adev->mes.fw->data;
 +
 +      fw_data = (const __le32 *)(adev->mes.fw->data +
 +                 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
 +      fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
 +
 +      r = amdgpu_bo_create_reserved(adev, fw_size,
 +                                    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
 +                                    &adev->mes.ucode_fw_obj,
 +                                    &adev->mes.ucode_fw_gpu_addr,
 +                                    (void **)&adev->mes.ucode_fw_ptr);
 +      if (r) {
 +              dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r);
 +              return r;
 +      }
 +
 +      memcpy(adev->mes.ucode_fw_ptr, fw_data, fw_size);
 +
 +      amdgpu_bo_kunmap(adev->mes.ucode_fw_obj);
 +      amdgpu_bo_unreserve(adev->mes.ucode_fw_obj);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_allocate_ucode_data_buffer(struct amdgpu_device *adev)
 +{
 +      int r;
 +      const struct mes_firmware_header_v1_0 *mes_hdr;
 +      const __le32 *fw_data;
 +      unsigned fw_size;
 +
 +      mes_hdr = (const struct mes_firmware_header_v1_0 *)
 +              adev->mes.fw->data;
 +
 +      fw_data = (const __le32 *)(adev->mes.fw->data +
 +                 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
 +      fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
 +
 +      r = amdgpu_bo_create_reserved(adev, fw_size,
 +                                    64 * 1024, AMDGPU_GEM_DOMAIN_GTT,
 +                                    &adev->mes.data_fw_obj,
 +                                    &adev->mes.data_fw_gpu_addr,
 +                                    (void **)&adev->mes.data_fw_ptr);
 +      if (r) {
 +              dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r);
 +              return r;
 +      }
 +
 +      memcpy(adev->mes.data_fw_ptr, fw_data, fw_size);
 +
 +      amdgpu_bo_kunmap(adev->mes.data_fw_obj);
 +      amdgpu_bo_unreserve(adev->mes.data_fw_obj);
 +
 +      return 0;
 +}
 +
 +static void mes_v10_1_free_ucode_buffers(struct amdgpu_device *adev)
 +{
 +      amdgpu_bo_free_kernel(&adev->mes.data_fw_obj,
 +                            &adev->mes.data_fw_gpu_addr,
 +                            (void **)&adev->mes.data_fw_ptr);
 +
 +      amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj,
 +                            &adev->mes.ucode_fw_gpu_addr,
 +                            (void **)&adev->mes.ucode_fw_ptr);
 +}
 +
 +static void mes_v10_1_enable(struct amdgpu_device *adev, bool enable)
 +{
 +      uint32_t data = 0;
 +
 +      if (enable) {
 +              data = RREG32_SOC15(GC, 0, mmCP_MES_CNTL);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
 +              WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data);
 +
 +              /* set ucode start address */
 +              WREG32_SOC15(GC, 0, mmCP_MES_PRGRM_CNTR_START,
 +                           (uint32_t)(adev->mes.uc_start_addr) >> 2);
 +
 +              /* clear BYPASS_UNCACHED to avoid hangs after interrupt. */
 +              data = RREG32_SOC15(GC, 0, mmCP_MES_DC_OP_CNTL);
 +              data = REG_SET_FIELD(data, CP_MES_DC_OP_CNTL,
 +                                   BYPASS_UNCACHED, 0);
 +              WREG32_SOC15(GC, 0, mmCP_MES_DC_OP_CNTL, data);
 +
 +              /* unhalt MES and activate pipe0 */
 +              data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
 +              WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data);
 +      } else {
 +              data = RREG32_SOC15(GC, 0, mmCP_MES_CNTL);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL,
 +                                   MES_INVALIDATE_ICACHE, 1);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
 +              data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
 +              WREG32_SOC15(GC, 0, mmCP_MES_CNTL, data);
 +      }
 +}
 +
 +/* This function is for backdoor MES firmware */
 +static int mes_v10_1_load_microcode(struct amdgpu_device *adev)
 +{
 +      int r;
 +      uint32_t data;
 +
 +      if (!adev->mes.fw)
 +              return -EINVAL;
 +
 +      r = mes_v10_1_allocate_ucode_buffer(adev);
 +      if (r)
 +              return r;
 +
 +      r = mes_v10_1_allocate_ucode_data_buffer(adev);
 +      if (r) {
 +              mes_v10_1_free_ucode_buffers(adev);
 +              return r;
 +      }
 +
 +      mes_v10_1_enable(adev, false);
 +
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_BASE_CNTL, 0);
 +
 +      mutex_lock(&adev->srbm_mutex);
 +      /* me=3, pipe=0, queue=0 */
 +      nv_grbm_select(adev, 3, 0, 0, 0);
 +
 +      /* set ucode start address */
 +      WREG32_SOC15(GC, 0, mmCP_MES_PRGRM_CNTR_START,
 +                   (uint32_t)(adev->mes.uc_start_addr) >> 2);
 +
 +      /* set ucode fimrware address */
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_BASE_LO,
 +                   lower_32_bits(adev->mes.ucode_fw_gpu_addr));
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_BASE_HI,
 +                   upper_32_bits(adev->mes.ucode_fw_gpu_addr));
 +
 +      /* set ucode instruction cache boundary to 2M-1 */
 +      WREG32_SOC15(GC, 0, mmCP_MES_MIBOUND_LO, 0x1FFFFF);
 +
 +      /* set ucode data firmware address */
 +      WREG32_SOC15(GC, 0, mmCP_MES_MDBASE_LO,
 +                   lower_32_bits(adev->mes.data_fw_gpu_addr));
 +      WREG32_SOC15(GC, 0, mmCP_MES_MDBASE_HI,
 +                   upper_32_bits(adev->mes.data_fw_gpu_addr));
 +
 +      /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */
 +      WREG32_SOC15(GC, 0, mmCP_MES_MDBOUND_LO, 0x3FFFF);
 +
 +      /* invalidate ICACHE */
 +      data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL);
 +      data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
 +      data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data);
 +
 +      /* prime the ICACHE. */
 +      data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL);
 +      data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
 +      WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL, data);
 +
 +      nv_grbm_select(adev, 0, 0, 0, 0);
 +      mutex_unlock(&adev->srbm_mutex);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_sw_init(void *handle)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      r = mes_v10_1_init_microcode(adev);
 +      if (r)
 +              return r;
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_sw_fini(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      mes_v10_1_free_microcode(adev);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_hw_init(void *handle)
 +{
 +      int r;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
 +              r = mes_v10_1_load_microcode(adev);
 +              if (r) {
 +                      DRM_ERROR("failed to MES fw, r=%d\n", r);
 +                      return r;
 +              }
 +      } else {
 +              DRM_ERROR("only support direct fw loading on MES\n");
 +              return -EINVAL;
 +      }
 +
 +      mes_v10_1_enable(adev, true);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_hw_fini(void *handle)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 +
 +      mes_v10_1_enable(adev, false);
 +
 +      if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
 +              mes_v10_1_free_ucode_buffers(adev);
 +
 +      return 0;
 +}
 +
 +static int mes_v10_1_suspend(void *handle)
 +{
 +      return 0;
 +}
 +
 +static int mes_v10_1_resume(void *handle)
 +{
 +      return 0;
 +}
 +
 +static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
 +      .name = "mes_v10_1",
 +      .sw_init = mes_v10_1_sw_init,
 +      .sw_fini = mes_v10_1_sw_fini,
 +      .hw_init = mes_v10_1_hw_init,
 +      .hw_fini = mes_v10_1_hw_fini,
 +      .suspend = mes_v10_1_suspend,
 +      .resume = mes_v10_1_resume,
 +};
 +
 +const struct amdgpu_ip_block_version mes_v10_1_ip_block = {
 +      .type = AMD_IP_BLOCK_TYPE_MES,
 +      .major = 10,
 +      .minor = 1,
 +      .rev = 0,
 +      .funcs = &mes_v10_1_ip_funcs,
 +};
@@@ -21,6 -21,8 +21,8 @@@
   */
  
  #include <linux/firmware.h>
+ #include <linux/module.h>
  #include "amdgpu.h"
  #include "amdgpu_psp.h"
  #include "amdgpu_ucode.h"
  MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
  MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
  MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
 +MODULE_FIRMWARE("amdgpu/navi10_sos.bin");
 +MODULE_FIRMWARE("amdgpu/navi10_asd.bin");
  
  /* address block */
  #define smnMP1_FIRMWARE_FLAGS         0x3010024
 +/* navi10 reg offset define */
 +#define mmRLC_GPM_UCODE_ADDR_NV10     0x5b61
 +#define mmRLC_GPM_UCODE_DATA_NV10     0x5b62
 +#define mmSDMA0_UCODE_ADDR_NV10               0x5880
 +#define mmSDMA0_UCODE_DATA_NV10               0x5881
  
  static int psp_v11_0_init_microcode(struct psp_context *psp)
  {
@@@ -57,7 -52,6 +59,7 @@@
        char fw_name[30];
        int err = 0;
        const struct psp_firmware_header_v1_0 *sos_hdr;
 +      const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
        const struct psp_firmware_header_v1_0 *asd_hdr;
        const struct ta_firmware_header_v1_0 *ta_hdr;
  
@@@ -67,9 -61,6 +69,9 @@@
        case CHIP_VEGA20:
                chip_name = "vega20";
                break;
 +      case CHIP_NAVI10:
 +              chip_name = "navi10";
 +              break;
        default:
                BUG();
        }
                goto out;
  
        sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
 -      adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
 -      adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
 -      adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
 -      adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->header.ucode_size_bytes) -
 -                                      le32_to_cpu(sos_hdr->sos_size_bytes);
 -      adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
 +      amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
 +
 +      switch (sos_hdr->header.header_version_major) {
 +      case 1:
 +              adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
 +              adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
 +              adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
 +              adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
 +              adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
                                le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
 -      adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
 +              adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
                                le32_to_cpu(sos_hdr->sos_offset_bytes);
 +              if (sos_hdr->header.header_version_minor == 1) {
 +                      sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
 +                      adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
 +                      adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
 +                                      le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
 +              }
 +              break;
 +      default:
 +              dev_err(adev->dev,
 +                      "Unsupported psp sos firmware\n");
 +              err = -EINVAL;
 +              goto out;
 +      }
  
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
        err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
        adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
                                le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
  
 -      snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
 -      err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
 -      if (err) {
 -              release_firmware(adev->psp.ta_fw);
 -              adev->psp.ta_fw = NULL;
 -              dev_info(adev->dev,
 -                       "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
 -      } else {
 -              err = amdgpu_ucode_validate(adev->psp.ta_fw);
 -              if (err)
 -                      goto out2;
 -
 -              ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
 -              adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
 -              adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
 -              adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
 -                      le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
 -
 -              adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
 -
 -              adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
 -              adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
 -              adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
 -                      le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
 +      switch (adev->asic_type) {
 +      case CHIP_VEGA20:
 +              snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
 +              err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
 +              if (err) {
 +                      release_firmware(adev->psp.ta_fw);
 +                      adev->psp.ta_fw = NULL;
 +                      dev_info(adev->dev,
 +                               "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
 +              } else {
 +                      err = amdgpu_ucode_validate(adev->psp.ta_fw);
 +                      if (err)
 +                              goto out2;
 +
 +                      ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
 +                      adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
 +                      adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
 +                      adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
 +                              le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
 +                      adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
 +                      adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
 +                      adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
 +                      adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
 +                              le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
 +              }
 +              break;
 +      case CHIP_NAVI10:
 +              break;
 +      default:
 +              BUG();
        }
  
        return 0;
@@@ -532,24 -501,14 +534,24 @@@ psp_v11_0_sram_map(struct amdgpu_devic
  
        case AMDGPU_UCODE_ID_RLC_G:
                *sram_offset = 0x2000;
 -              *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
 -              *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
 +              if (adev->asic_type != CHIP_NAVI10) {
 +                      *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
 +                      *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
 +              } else {
 +                      *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10;
 +                      *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10;
 +              }
                break;
  
        case AMDGPU_UCODE_ID_SDMA0:
                *sram_offset = 0x0;
 -              *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
 -              *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
 +              if (adev->asic_type != CHIP_NAVI10) {
 +                      *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
 +                      *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
 +              } else {
 +                      *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10;
 +                      *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10;
 +              }
                break;
  
  /* TODO: needs to confirm */
@@@ -813,11 -772,6 +815,11 @@@ static int psp_v11_0_ras_cure_posion(st
  #endif
  }
  
 +static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
 +{
 +      return psp_rlc_autoload_start(psp);
 +}
 +
  static const struct psp_funcs psp_v11_0_funcs = {
        .init_microcode = psp_v11_0_init_microcode,
        .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
        .support_vmr_ring = psp_v11_0_support_vmr_ring,
        .ras_trigger_error = psp_v11_0_ras_trigger_error,
        .ras_cure_posion = psp_v11_0_ras_cure_posion,
 +      .rlc_autoload_start = psp_v11_0_rlc_autoload_start,
  };
  
  void psp_v11_0_set_psp_funcs(struct psp_context *psp)
   *
   */
  
+ #include <linux/delay.h>
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_ucode.h"
  #include "amdgpu_trace.h"
@@@ -1209,7 -1212,7 +1212,7 @@@ static int sdma_v4_0_ring_test_ring(str
                tmp = le32_to_cpu(adev->wb.wb[index]);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -1566,7 -1569,9 +1569,7 @@@ static int sdma_v4_0_late_init(void *ha
        if (r)
                goto interrupt;
  
 -      r = amdgpu_ras_debugfs_create(adev, &fs_info);
 -      if (r)
 -              goto debugfs;
 +      amdgpu_ras_debugfs_create(adev, &fs_info);
  
        r = amdgpu_ras_sysfs_create(adev, &fs_info);
        if (r)
@@@ -1587,6 -1592,7 +1590,6 @@@ irq
        amdgpu_ras_sysfs_remove(adev, *ras_if);
  sysfs:
        amdgpu_ras_debugfs_remove(adev, *ras_if);
 -debugfs:
        amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
  interrupt:
        amdgpu_ras_feature_enable(adev, *ras_if, 0);
@@@ -23,7 -23,8 +23,8 @@@
  #include <linux/firmware.h>
  #include <linux/slab.h>
  #include <linux/module.h>
- #include <drm/drmP.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_atombios.h"
  #include "amdgpu_ih.h"
@@@ -274,6 -275,15 +275,6 @@@ static bool soc15_read_bios_from_rom(st
        return true;
  }
  
 -struct soc15_allowed_register_entry {
 -      uint32_t hwip;
 -      uint32_t inst;
 -      uint32_t seg;
 -      uint32_t reg_offset;
 -      bool grbm_indexed;
 -};
 -
 -
  static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
@@@ -378,7 -388,7 +379,7 @@@ void soc15_program_register_sequence(st
                } else {
                        tmp = RREG32(reg);
                        tmp &= ~(entry->and_mask);
 -                      tmp |= entry->or_mask;
 +                      tmp |= (entry->or_mask & entry->and_mask);
                }
  
                if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
@@@ -1025,8 -1035,6 +1026,8 @@@ static int soc15_common_sw_init(void *h
        if (amdgpu_sriov_vf(adev))
                xgpu_ai_mailbox_add_irq_id(adev);
  
 +      adev->df_funcs->sw_init(adev);
 +
        return 0;
  }
  
@@@ -1073,7 -1081,6 +1074,7 @@@ static int soc15_common_hw_init(void *h
         */
        if (adev->nbio_funcs->remap_hdp_registers)
                adev->nbio_funcs->remap_hdp_registers(adev);
 +
        /* enable the doorbell aperture */
        soc15_enable_doorbell_aperture(adev, true);
        /* HW doorbell routing policy: doorbell writing not
@@@ -22,7 -22,7 +22,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_vcn.h"
  #include "soc15.h"
@@@ -128,17 -128,6 +128,17 @@@ static int vcn_v1_0_sw_init(void *handl
        if (r)
                return r;
  
 +      adev->vcn.internal.scratch9 = adev->vcn.external.scratch9 =
 +              SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
 +      adev->vcn.internal.data0 = adev->vcn.external.data0 =
 +              SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
 +      adev->vcn.internal.data1 = adev->vcn.external.data1 =
 +              SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
 +      adev->vcn.internal.cmd = adev->vcn.external.cmd =
 +              SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
 +      adev->vcn.internal.nop = adev->vcn.external.nop =
 +              SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
 +
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
                ring = &adev->vcn.ring_enc[i];
                sprintf(ring->name, "vcn_enc%d", i);
                return r;
  
        adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
 +      adev->vcn.internal.jpeg_pitch = adev->vcn.external.jpeg_pitch =
 +              SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH);
  
        return 0;
  }
  #include <linux/version.h>
  #include <linux/types.h>
  #include <linux/pm_runtime.h>
+ #include <linux/pci.h>
  #include <linux/firmware.h>
  
- #include <drm/drmP.h>
  #include <drm/drm_atomic.h>
  #include <drm/drm_atomic_uapi.h>
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_dp_mst_helper.h>
  #include <drm/drm_fb_helper.h>
+ #include <drm/drm_fourcc.h>
  #include <drm/drm_edid.h>
+ #include <drm/drm_vblank.h>
  
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 -#include "ivsrcid/irqsrcs_dcn_1_0.h"
 +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
  
  #include "dcn/dcn_1_0_offset.h"
  #include "dcn/dcn_1_0_sh_mask.h"
@@@ -558,10 -560,6 +560,10 @@@ static int amdgpu_dm_init(struct amdgpu
  
        init_data.flags.power_down_display_on_boot = true;
  
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +      init_data.soc_bounding_box = adev->dm.soc_bounding_box;
 +#endif
 +
        /* Display Core create. */
        adev->dm.dc = dc_create(&init_data);
  
@@@ -667,7 -665,6 +669,7 @@@ static int load_dmcu_fw(struct amdgpu_d
        case CHIP_VEGA10:
        case CHIP_VEGA12:
        case CHIP_VEGA20:
 +      case CHIP_NAVI10:
                return 0;
        case CHIP_RAVEN:
                if (ASICREV_IS_PICASSO(adev->external_rev_id))
@@@ -782,7 -779,7 +784,7 @@@ static int dm_late_init(void *handle
        unsigned int linear_lut[16];
        int i;
        struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
 -      bool ret;
 +      bool ret = false;
  
        for (i = 0; i < 16; i++)
                linear_lut[i] = 0xFFFF * i / 15;
        params.backlight_lut_array_size = 16;
        params.backlight_lut_array = linear_lut;
  
 -      ret = dmcu_load_iram(dmcu, params);
 +      /* todo will enable for navi10 */
 +      if (adev->asic_type <= CHIP_RAVEN) {
 +              ret = dmcu_load_iram(dmcu, params);
  
 -      if (!ret)
 -              return -EINVAL;
 +              if (!ret)
 +                      return -EINVAL;
 +      }
  
        return detect_mst_link_for_all_connectors(adev->ddev);
  }
@@@ -2215,9 -2209,6 +2217,9 @@@ static int amdgpu_dm_initialize_drm_dev
                break;
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
        case CHIP_RAVEN:
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      case CHIP_NAVI10:
 +#endif
                if (dcn10_register_irq_handlers(dm->adev)) {
                        DRM_ERROR("DM: Failed to initialize IRQ\n");
                        goto fail;
@@@ -2371,13 -2362,6 +2373,13 @@@ static int dm_early_init(void *handle
                adev->mode_info.num_dig = 4;
                break;
  #endif
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      case CHIP_NAVI10:
 +              adev->mode_info.num_crtc = 6;
 +              adev->mode_info.num_hpd = 6;
 +              adev->mode_info.num_dig = 6;
 +              break;
 +#endif
        default:
                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
                return -EINVAL;
@@@ -2670,9 -2654,6 +2672,9 @@@ fill_plane_buffer_attributes(struct amd
        if (adev->asic_type == CHIP_VEGA10 ||
            adev->asic_type == CHIP_VEGA12 ||
            adev->asic_type == CHIP_VEGA20 ||
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +          adev->asic_type == CHIP_NAVI10 ||
 +#endif
            adev->asic_type == CHIP_RAVEN) {
                /* Fill GFX9 params */
                tiling_info->gfx9.num_pipes =
@@@ -2878,7 -2859,6 +2880,7 @@@ static int fill_dc_plane_attributes(str
                                    struct drm_plane_state *plane_state,
                                    struct drm_crtc_state *crtc_state)
  {
 +      struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
        const struct amdgpu_framebuffer *amdgpu_fb =
                to_amdgpu_framebuffer(plane_state->fb);
        struct dc_scaling_info scaling_info;
         * Always set input transfer function, since plane state is refreshed
         * every time.
         */
 -      ret = amdgpu_dm_set_degamma_lut(crtc_state, dc_plane_state);
 -      if (ret) {
 -              dc_transfer_func_release(dc_plane_state->in_transfer_func);
 -              dc_plane_state->in_transfer_func = NULL;
 -      }
 +      ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
 +      if (ret)
 +              return ret;
  
 -      return ret;
 +      return 0;
  }
  
  static void update_stream_scaling_settings(const struct drm_display_mode *mode,
@@@ -2991,9 -2973,6 +2993,9 @@@ convert_color_depth_from_display_info(c
  {
        uint32_t bpc = connector->display_info.bpc;
  
 +      if (!state)
 +              state = connector->state;
 +
        if (state) {
                bpc = state->max_bpc;
                /* Round down to the nearest even number. */
@@@ -3420,20 -3399,6 +3422,20 @@@ create_stream_for_sink(struct amdgpu_dm
                fill_stream_properties_from_drm_display_mode(stream,
                        &mode, &aconnector->base, con_state, old_stream);
  
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      /* stream->timing.flags.DSC = 0; */
 +        /*  */
 +      /* if (aconnector->dc_link && */
 +      /*              aconnector->dc_link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT #<{(|&& */
 +      /*              aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.is_dsc_supported|)}>#) */
 +      /*      if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, */
 +      /*                      &aconnector->dc_link->dpcd_caps.dsc_caps, */
 +      /*                      dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)), */
 +      /*                      &stream->timing, */
 +      /*                      &stream->timing.dsc_cfg)) */
 +      /*              stream->timing.flags.DSC = 1; */
 +#endif
 +
        update_stream_scaling_settings(&mode, dm_state, stream);
  
        fill_audio_info(
@@@ -3516,8 -3481,6 +3518,8 @@@ dm_crtc_duplicate_state(struct drm_crt
        state->vrr_supported = cur->vrr_supported;
        state->freesync_config = cur->freesync_config;
        state->crc_enabled = cur->crc_enabled;
 +      state->cm_has_degamma = cur->cm_has_degamma;
 +      state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
  
        /* TODO Duplicate dc_stream after objects are stream object is flattened */
  
@@@ -3772,10 -3735,6 +3774,10 @@@ void amdgpu_dm_connector_funcs_reset(st
                state->underscan_enable = false;
                state->underscan_hborder = 0;
                state->underscan_vborder = 0;
 +              state->base.max_requested_bpc = 8;
 +
 +              if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
 +                      state->abm_level = amdgpu_dm_abm_level;
  
                __drm_atomic_helper_connector_reset(connector, &state->base);
        }
@@@ -4009,9 -3968,10 +4011,10 @@@ is_hdr_metadata_different(const struct 
  
  static int
  amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
-                                struct drm_connector_state *new_con_state)
+                                struct drm_atomic_state *state)
  {
-       struct drm_atomic_state *state = new_con_state->state;
+       struct drm_connector_state *new_con_state =
+               drm_atomic_get_new_connector_state(state, conn);
        struct drm_connector_state *old_con_state =
                drm_atomic_get_old_connector_state(state, conn);
        struct drm_crtc *crtc = new_con_state->crtc;
@@@ -4416,8 -4376,7 +4419,7 @@@ static void dm_plane_atomic_async_updat
        struct drm_plane_state *old_state =
                drm_atomic_get_old_plane_state(new_state->state, plane);
  
-       if (plane->state->fb != new_state->fb)
-               drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+       swap(plane->state->fb, new_state->fb);
  
        plane->state->src_x = new_state->src_x;
        plane->state->src_y = new_state->src_y;
@@@ -4821,13 -4780,6 +4823,13 @@@ void amdgpu_dm_connector_init_helper(st
  {
        struct amdgpu_device *adev = dm->ddev->dev_private;
  
 +      /*
 +       * Some of the properties below require access to state, like bpc.
 +       * Allocate some default initial connector state with our reset helper.
 +       */
 +      if (aconnector->base.funcs->reset)
 +              aconnector->base.funcs->reset(&aconnector->base);
 +
        aconnector->connector_id = link_index;
        aconnector->dc_link = link;
        aconnector->base.interlace_allowed = false;
@@@ -5017,6 -4969,9 +5019,6 @@@ static int amdgpu_dm_connector_init(str
                        &aconnector->base,
                        &amdgpu_dm_connector_helper_funcs);
  
 -      if (aconnector->base.funcs->reset)
 -              aconnector->base.funcs->reset(&aconnector->base);
 -
        amdgpu_dm_connector_init_helper(
                dm,
                aconnector,
  
        drm_connector_register(&aconnector->base);
  #if defined(CONFIG_DEBUG_FS)
 -      res = connector_debugfs_init(aconnector);
 -      if (res) {
 -              DRM_ERROR("Failed to create debugfs for connector");
 -              goto out_free;
 -      }
 +      connector_debugfs_init(aconnector);
        aconnector->debugfs_dpcd_address = 0;
        aconnector->debugfs_dpcd_size = 0;
  #endif
@@@ -5675,18 -5634,8 +5677,18 @@@ static void amdgpu_dm_commit_planes(str
                        bundle->stream_update.dst = acrtc_state->stream->dst;
                }
  
 -              if (new_pcrtc_state->color_mgmt_changed)
 -                      bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
 +              if (new_pcrtc_state->color_mgmt_changed) {
 +                      /*
 +                       * TODO: This isn't fully correct since we've actually
 +                       * already modified the stream in place.
 +                       */
 +                      bundle->stream_update.gamut_remap =
 +                              &acrtc_state->stream->gamut_remap_matrix;
 +                      bundle->stream_update.output_csc_transform =
 +                              &acrtc_state->stream->csc_color_matrix;
 +                      bundle->stream_update.out_transfer_func =
 +                              acrtc_state->stream->out_transfer_func;
 +              }
  
                acrtc_state->stream->abm_level = acrtc_state->abm_level;
                if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
@@@ -6397,17 -6346,7 +6399,17 @@@ static int dm_update_crtc_state(struct 
                if (ret)
                        goto fail;
  
 -              if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
 +              /*
 +               * If we already removed the old stream from the context
 +               * (and set the new stream to NULL) then we can't reuse
 +               * the old stream even if the stream and scaling are unchanged.
 +               * We'll hit the BUG_ON and black screen.
 +               *
 +               * TODO: Refactor this function to allow this check to work
 +               * in all conditions.
 +               */
 +              if (dm_new_crtc_state->stream &&
 +                  dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
                    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
                        new_crtc_state->mode_changed = false;
                        DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
@@@ -6536,9 -6475,10 +6538,9 @@@ skip_modeset
         */
        if (dm_new_crtc_state->base.color_mgmt_changed ||
            drm_atomic_crtc_needs_modeset(new_crtc_state)) {
 -              ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
 +              ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
                if (ret)
                        goto fail;
 -              amdgpu_dm_set_ctm(dm_new_crtc_state);
        }
  
        /* Update Freesync settings. */
@@@ -6841,8 -6781,6 +6843,8 @@@ dm_determine_update_type_for_commit(str
                                                new_dm_plane_state->dc_state->in_transfer_func;
                                stream_update.gamut_remap =
                                                &new_dm_crtc_state->stream->gamut_remap_matrix;
 +                              stream_update.output_csc_transform =
 +                                              &new_dm_crtc_state->stream->csc_color_matrix;
                                stream_update.out_transfer_func =
                                                new_dm_crtc_state->stream->out_transfer_func;
                        }
  #ifndef __AMDGPU_DM_H__
  #define __AMDGPU_DM_H__
  
- #include <drm/drmP.h>
  #include <drm/drm_atomic.h>
+ #include <drm/drm_connector.h>
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_dp_mst_helper.h>
+ #include <drm/drm_plane.h>
  
  /*
   * This file contains the definition for amdgpu_display_manager
@@@ -206,13 -209,6 +209,13 @@@ struct amdgpu_display_manager 
  
        const struct firmware *fw_dmcu;
        uint32_t dmcu_fw_version;
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +      /**
 +       * gpu_info FW provided soc bounding box struct or 0 if not
 +       * available in FW
 +       */
 +      const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
 +#endif
  };
  
  struct amdgpu_dm_connector {
@@@ -278,9 -274,6 +281,9 @@@ struct dm_crtc_state 
        struct drm_crtc_state base;
        struct dc_stream_state *stream;
  
 +      bool cm_has_degamma;
 +      bool cm_is_degamma_srgb;
 +
        int active_planes;
        bool interrupts_enabled;
  
@@@ -370,9 -363,10 +373,9 @@@ void amdgpu_dm_crtc_handle_crc_irq(stru
  #define MAX_COLOR_LEGACY_LUT_ENTRIES 256
  
  void amdgpu_dm_init_color_mod(void);
 -int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
 -                            struct dc_plane_state *dc_plane_state);
 -void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc);
 -int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc);
 +int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
 +int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
 +                                    struct dc_plane_state *dc_plane_state);
  
  extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
  
@@@ -23,7 -23,9 +23,9 @@@
   *
   */
  
- #include <linux/debugfs.h>
+ #include <linux/uaccess.h>
+ #include <drm/drm_debugfs.h>
  
  #include "dc.h"
  #include "amdgpu.h"
@@@ -673,71 -675,6 +675,71 @@@ static ssize_t dp_phy_test_pattern_debu
  }
  
  /*
 + * Returns the current and maximum output bpc for the connector.
 + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
 + */
 +static int output_bpc_show(struct seq_file *m, void *data)
 +{
 +      struct drm_connector *connector = m->private;
 +      struct drm_device *dev = connector->dev;
 +      struct drm_crtc *crtc = NULL;
 +      struct dm_crtc_state *dm_crtc_state = NULL;
 +      int res = -ENODEV;
 +      unsigned int bpc;
 +
 +      mutex_lock(&dev->mode_config.mutex);
 +      drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 +
 +      if (connector->state == NULL)
 +              goto unlock;
 +
 +      crtc = connector->state->crtc;
 +      if (crtc == NULL)
 +              goto unlock;
 +
 +      drm_modeset_lock(&crtc->mutex, NULL);
 +      if (crtc->state == NULL)
 +              goto unlock;
 +
 +      dm_crtc_state = to_dm_crtc_state(crtc->state);
 +      if (dm_crtc_state->stream == NULL)
 +              goto unlock;
 +
 +      switch (dm_crtc_state->stream->timing.display_color_depth) {
 +      case COLOR_DEPTH_666:
 +              bpc = 6;
 +              break;
 +      case COLOR_DEPTH_888:
 +              bpc = 8;
 +              break;
 +      case COLOR_DEPTH_101010:
 +              bpc = 10;
 +              break;
 +      case COLOR_DEPTH_121212:
 +              bpc = 12;
 +              break;
 +      case COLOR_DEPTH_161616:
 +              bpc = 16;
 +              break;
 +      default:
 +              goto unlock;
 +      }
 +
 +      seq_printf(m, "Current: %u\n", bpc);
 +      seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
 +      res = 0;
 +
 +unlock:
 +      if (crtc)
 +              drm_modeset_unlock(&crtc->mutex);
 +
 +      drm_modeset_unlock(&dev->mode_config.connection_mutex);
 +      mutex_unlock(&dev->mode_config.mutex);
 +
 +      return res;
 +}
 +
 +/*
   * Returns the min and max vrr vfreq through the connector's debugfs file.
   * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
   */
@@@ -795,6 -732,8 +797,6 @@@ static ssize_t dp_sdp_message_debugfs_w
        return write_size;
  }
  
 -DEFINE_SHOW_ATTRIBUTE(vrr_range);
 -
  static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
                                 size_t size, loff_t *pos)
  {
@@@ -877,9 -816,6 +879,9 @@@ static ssize_t dp_dpcd_data_read(struc
        return read_size - r;
  }
  
 +DEFINE_SHOW_ATTRIBUTE(output_bpc);
 +DEFINE_SHOW_ATTRIBUTE(vrr_range);
 +
  static const struct file_operations dp_link_settings_debugfs_fops = {
        .owner = THIS_MODULE,
        .read = dp_link_settings_read,
@@@ -932,7 -868,6 +934,7 @@@ static const struct 
                {"link_settings", &dp_link_settings_debugfs_fops},
                {"phy_settings", &dp_phy_settings_debugfs_fop},
                {"test_pattern", &dp_phy_test_pattern_fops},
 +              {"output_bpc", &output_bpc_fops},
                {"vrr_range", &vrr_range_fops},
                {"sdp_message", &sdp_message_fops},
                {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
                {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
  };
  
 -int connector_debugfs_init(struct amdgpu_dm_connector *connector)
 +void connector_debugfs_init(struct amdgpu_dm_connector *connector)
  {
        int i;
 -      struct dentry *ent, *dir = connector->base.debugfs_entry;
 +      struct dentry *dir = connector->base.debugfs_entry;
  
        if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
            connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
                for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
 -                      ent = debugfs_create_file(dp_debugfs_entries[i].name,
 -                                                0644,
 -                                                dir,
 -                                                connector,
 -                                                dp_debugfs_entries[i].fops);
 -                      if (IS_ERR(ent))
 -                              return PTR_ERR(ent);
 +                      debugfs_create_file(dp_debugfs_entries[i].name,
 +                                          0644, dir, connector,
 +                                          dp_debugfs_entries[i].fops);
                }
        }
 -
 -      return 0;
  }
  
  /*
@@@ -1095,7 -1036,7 +1097,7 @@@ int dtn_debugfs_init(struct amdgpu_devi
        };
  
        struct drm_minor *minor = adev->ddev->primary;
 -      struct dentry *ent, *root = minor->debugfs_root;
 +      struct dentry *root = minor->debugfs_root;
        int ret;
  
        ret = amdgpu_debugfs_add_files(adev, amdgpu_dm_debugfs_list,
        if (ret)
                return ret;
  
 -      ent = debugfs_create_file(
 -              "amdgpu_dm_dtn_log",
 -              0644,
 -              root,
 -              adev,
 -              &dtn_log_fops);
 -
 -      if (IS_ERR(ent))
 -              return PTR_ERR(ent);
 +      debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev,
 +                          &dtn_log_fops);
  
 -      ent = debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root,
 -                                       adev, &visual_confirm_fops);
 -      if (IS_ERR(ent))
 -              return PTR_ERR(ent);
 +      debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
 +                                 &visual_confirm_fops);
  
        return 0;
  }
@@@ -28,7 -28,6 +28,6 @@@
  #include <linux/version.h>
  #include <linux/i2c.h>
  
- #include <drm/drmP.h>
  #include <drm/drm_probe_helper.h>
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_edid.h>
@@@ -542,16 -541,6 +541,16 @@@ bool dm_helpers_submit_i2c
  
        return result;
  }
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +bool dm_helpers_dp_write_dsc_enable(
 +              struct dc_context *ctx,
 +              const struct dc_stream_state *stream,
 +              bool enable
 +)
 +{
 +      return false;
 +}
 +#endif
  
  bool dm_helpers_is_dp_sink_present(struct dc_link *link)
  {
@@@ -23,8 -23,6 +23,6 @@@
   *
   */
  
- #include <drm/drmP.h>
  #include "dm_services_types.h"
  #include "dc.h"
  
@@@ -279,6 -277,8 +277,6 @@@ void *amdgpu_dm_irq_register_interrupt(
                return DAL_INVALID_IRQ_HANDLER_IDX;
        }
  
 -      memset(handler_data, 0, sizeof(*handler_data));
 -
        init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
  
        irq_source = int_params->irq_source;
@@@ -24,7 -24,6 +24,6 @@@
  #include <linux/string.h>
  #include <linux/acpi.h>
  
- #include <drm/drmP.h>
  #include <drm/drm_probe_helper.h>
  #include <drm/amdgpu_drm.h>
  #include "dm_services.h"
@@@ -149,23 -148,6 +148,23 @@@ static void get_default_clock_levels
        }
  }
  
 +static enum smu_clk_type dc_to_smu_clock_type(
 +              enum dm_pp_clock_type dm_pp_clk_type)
 +{
 +#define DCCLK_MAP_SMUCLK(dcclk, smuclk) \
 +      [dcclk] = smuclk
 +
 +      static int dc_clk_type_map[] = {
 +              DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DISPLAY_CLK,  SMU_DISPCLK),
 +              DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_ENGINE_CLK,   SMU_GFXCLK),
 +              DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_MEMORY_CLK,   SMU_MCLK),
 +              DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DCEFCLK,      SMU_DCEFCLK),
 +              DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_SOCCLK,       SMU_SOCCLK),
 +      };
 +
 +      return dc_clk_type_map[dm_pp_clk_type];
 +}
 +
  static enum amd_pp_clock_type dc_to_pp_clock_type(
                enum dm_pp_clock_type dm_pp_clk_type)
  {
@@@ -334,7 -316,7 +333,7 @@@ bool dm_pp_get_clock_levels_by_type
                }
        } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
                if (smu_get_clock_by_type(&adev->smu,
 -                                        dc_to_pp_clock_type(clk_type),
 +                                        dc_to_smu_clock_type(clk_type),
                                          &pp_clks)) {
                        get_default_clock_levels(clk_type, dc_clks);
                        return true;
@@@ -647,279 -629,16 +646,279 @@@ void pp_rv_set_hard_min_fclk_by_freq(st
        pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
  }
  
 +enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
 +              struct pp_smu_wm_range_sets *ranges)
 +{
 +      const struct dc_context *ctx = pp->dm;
 +      struct amdgpu_device *adev = ctx->driver_context;
 +      struct smu_context *smu = &adev->smu;
 +      struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
 +      struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
 +                      wm_with_clock_ranges.wm_dmif_clocks_ranges;
 +      struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
 +                      wm_with_clock_ranges.wm_mcif_clocks_ranges;
 +      int32_t i;
 +
 +      wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
 +      wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
 +
 +      for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
 +              if (ranges->reader_wm_sets[i].wm_inst > 3)
 +                      wm_dce_clocks[i].wm_set_id = WM_SET_A;
 +              else
 +                      wm_dce_clocks[i].wm_set_id =
 +                                      ranges->reader_wm_sets[i].wm_inst;
 +              wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
 +                      ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
 +              wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
 +                      ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
 +              wm_dce_clocks[i].wm_max_mem_clk_in_khz =
 +                      ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
 +              wm_dce_clocks[i].wm_min_mem_clk_in_khz =
 +                      ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
 +      }
 +
 +      for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
 +              if (ranges->writer_wm_sets[i].wm_inst > 3)
 +                      wm_soc_clocks[i].wm_set_id = WM_SET_A;
 +              else
 +                      wm_soc_clocks[i].wm_set_id =
 +                                      ranges->writer_wm_sets[i].wm_inst;
 +              wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
 +                      ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
 +              wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
 +                      ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
 +              wm_soc_clocks[i].wm_max_mem_clk_in_khz =
 +                      ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
 +              wm_soc_clocks[i].wm_min_mem_clk_in_khz =
 +                      ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
 +      }
 +
 +      if (!smu->funcs)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
 +       * 1: fail
 +       */
 +      if (smu_set_watermarks_for_clock_ranges(&adev->smu,
 +                      &wm_with_clock_ranges))
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      return PP_SMU_RESULT_OK;
 +}
 +
 +enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
 +{
 +      const struct dc_context *ctx = pp->dm;
 +      struct amdgpu_device *adev = ctx->driver_context;
 +      struct smu_context *smu = &adev->smu;
 +
 +      if (!smu->funcs)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL;  1: fail */
 +      if (smu_set_azalia_d3_pme(smu))
 +              return PP_SMU_RESULT_FAIL;
 +
 +      return PP_SMU_RESULT_OK;
 +}
 +
 +enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
 +{
 +      const struct dc_context *ctx = pp->dm;
 +      struct amdgpu_device *adev = ctx->driver_context;
 +      struct smu_context *smu = &adev->smu;
 +
 +      if (!smu->funcs)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      /* 0: successful or smu.funcs->set_display_count = NULL;  1: fail */
 +      if (smu_set_display_count(smu, count))
 +              return PP_SMU_RESULT_FAIL;
 +
 +      return PP_SMU_RESULT_OK;
 +}
 +
 +enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
 +{
 +      const struct dc_context *ctx = pp->dm;
 +      struct amdgpu_device *adev = ctx->driver_context;
 +      struct smu_context *smu = &adev->smu;
 +
 +      if (!smu->funcs)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
 +      if (smu_set_deep_sleep_dcefclk(smu, mhz))
 +              return PP_SMU_RESULT_FAIL;
 +
 +      return PP_SMU_RESULT_OK;
 +}
 +
 +enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
 +              struct pp_smu *pp, int mhz)
 +{
 +      const struct dc_context *ctx = pp->dm;
 +      struct amdgpu_device *adev = ctx->driver_context;
 +      struct smu_context *smu = &adev->smu;
 +      struct pp_display_clock_request clock_req;
 +
 +      if (!smu->funcs)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      clock_req.clock_type = amd_pp_dcef_clock;
 +      clock_req.clock_freq_in_khz = mhz * 1000;
 +
 +      /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
 +       * 1: fail
 +       */
 +      if (smu_display_clock_voltage_request(smu, &clock_req))
 +              return PP_SMU_RESULT_FAIL;
 +
 +      return PP_SMU_RESULT_OK;
 +}
 +
 +enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
 +{
 +      const struct dc_context *ctx = pp->dm;
 +      struct amdgpu_device *adev = ctx->driver_context;
 +      struct smu_context *smu = &adev->smu;
 +      struct pp_display_clock_request clock_req;
 +
 +      if (!smu->funcs)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      clock_req.clock_type = amd_pp_mem_clock;
 +      clock_req.clock_freq_in_khz = mhz * 1000;
 +
 +      /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
 +       * 1: fail
 +       */
 +      if (smu_display_clock_voltage_request(smu, &clock_req))
 +              return PP_SMU_RESULT_FAIL;
 +
 +      return PP_SMU_RESULT_OK;
 +}
 +
 +enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
 +              enum pp_smu_nv_clock_id clock_id, int mhz)
 +{
 +      const struct dc_context *ctx = pp->dm;
 +      struct amdgpu_device *adev = ctx->driver_context;
 +      struct smu_context *smu = &adev->smu;
 +      struct pp_display_clock_request clock_req;
 +
 +      if (!smu->funcs)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      switch (clock_id) {
 +      case PP_SMU_NV_DISPCLK:
 +              clock_req.clock_type = amd_pp_disp_clock;
 +              break;
 +      case PP_SMU_NV_PHYCLK:
 +              clock_req.clock_type = amd_pp_phy_clock;
 +              break;
 +      case PP_SMU_NV_PIXELCLK:
 +              clock_req.clock_type = amd_pp_pixel_clock;
 +              break;
 +      default:
 +              break;
 +      }
 +      clock_req.clock_freq_in_khz = mhz * 1000;
 +
 +      /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
 +       * 1: fail
 +       */
 +      if (smu_display_clock_voltage_request(smu, &clock_req))
 +              return PP_SMU_RESULT_FAIL;
 +
 +      return PP_SMU_RESULT_OK;
 +}
 +
 +enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
 +              struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
 +{
 +      const struct dc_context *ctx = pp->dm;
 +      struct amdgpu_device *adev = ctx->driver_context;
 +      struct smu_context *smu = &adev->smu;
 +
 +      if (!smu->funcs)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      if (!smu->funcs->get_max_sustainable_clocks_by_dc)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
 +              return PP_SMU_RESULT_OK;
 +
 +      return PP_SMU_RESULT_FAIL;
 +}
 +
 +enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
 +              unsigned int *clock_values_in_khz, unsigned int *num_states)
 +{
 +      const struct dc_context *ctx = pp->dm;
 +      struct amdgpu_device *adev = ctx->driver_context;
 +      struct smu_context *smu = &adev->smu;
 +
 +      if (!smu->ppt_funcs)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      if (!smu->ppt_funcs->get_uclk_dpm_states)
 +              return PP_SMU_RESULT_UNSUPPORTED;
 +
 +      if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
 +                      clock_values_in_khz, num_states))
 +              return PP_SMU_RESULT_OK;
 +
 +      return PP_SMU_RESULT_FAIL;
 +}
 +
  void dm_pp_get_funcs(
                struct dc_context *ctx,
                struct pp_smu_funcs *funcs)
  {
 -      funcs->rv_funcs.pp_smu.dm = ctx;
 -      funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
 -      funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
 -      funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count;
 -      funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
 -      funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
 -      funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
 +      switch (ctx->dce_version) {
 +      case DCN_VERSION_1_0:
 +      case DCN_VERSION_1_01:
 +              funcs->ctx.ver = PP_SMU_VER_RV;
 +              funcs->rv_funcs.pp_smu.dm = ctx;
 +              funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
 +              funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
 +              funcs->rv_funcs.set_display_count =
 +                              pp_rv_set_active_display_count;
 +              funcs->rv_funcs.set_min_deep_sleep_dcfclk =
 +                              pp_rv_set_min_deep_sleep_dcfclk;
 +              funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
 +                              pp_rv_set_hard_min_dcefclk_by_freq;
 +              funcs->rv_funcs.set_hard_min_fclk_by_freq =
 +                              pp_rv_set_hard_min_fclk_by_freq;
 +              break;
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +      case DCN_VERSION_2_0:
 +              funcs->ctx.ver = PP_SMU_VER_NV;
 +              funcs->nv_funcs.pp_smu.dm = ctx;
 +              funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
 +              funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
 +                              pp_nv_set_hard_min_dcefclk_by_freq;
 +              funcs->nv_funcs.set_min_deep_sleep_dcfclk =
 +                              pp_nv_set_min_deep_sleep_dcfclk;
 +              funcs->nv_funcs.set_voltage_by_freq =
 +                              pp_nv_set_voltage_by_freq;
 +              funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
 +
 +              /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
 +              funcs->nv_funcs.set_pme_wa_enable = NULL;
 +              /* todo debug waring message */
 +              funcs->nv_funcs.set_hard_min_uclk_by_freq = NULL;
 +              /* todo  compare data with window driver*/
 +              funcs->nv_funcs.get_maximum_sustainable_clocks = NULL;
 +              /*todo  compare data with window driver */
 +              funcs->nv_funcs.get_uclk_dpm_states = NULL;
 +              break;
 +#endif
 +      default:
 +              DRM_ERROR("smu version is not supported !\n");
 +              break;
 +      }
  }
 -
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "ObjectID.h"
@@@ -1400,10 -1402,6 +1402,10 @@@ static enum bp_result get_integrated_in
        info->ma_channel_number = info_v11->umachannelnumber;
        info->lvds_ss_percentage =
        le16_to_cpu(info_v11->lvds_ss_percentage);
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +      info->dp_ss_control =
 +      le16_to_cpu(info_v11->reserved1);
 +#endif
        info->lvds_sspread_rate_in_10hz =
        le16_to_cpu(info_v11->lvds_ss_rate_10hz);
        info->hdmi_ss_percentage =
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dal_asic_id.h"
  #include "dc_types.h"
  #include "dccg.h"
@@@ -34,7 -36,6 +36,7 @@@
  #include "dce120/dce120_clk_mgr.h"
  #include "dcn10/rv1_clk_mgr.h"
  #include "dcn10/rv2_clk_mgr.h"
 +#include "dcn20/dcn20_clk_mgr.h"
  
  
  int clk_mgr_helper_get_active_display_cnt(
@@@ -118,12 -119,6 +120,12 @@@ struct clk_mgr *dc_clk_mgr_create(struc
                break;
  #endif        /* Family RV */
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      case FAMILY_NV:
 +              dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
 +              break;
 +#endif /* Family NV */
 +
        default:
                ASSERT(0); /* Unknown Asic */
                break;
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/slab.h>
+ #include "reg_helper.h"
  #include "core_types.h"
  #include "clk_mgr_internal.h"
  #include "rv1_clk_mgr.h"
@@@ -215,23 -218,9 +218,23 @@@ static void rv1_update_clocks(struct cl
        }
  }
  
 +static void rv1_enable_pme_wa(struct clk_mgr *clk_mgr_base)
 +{
 +      struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
 +      struct pp_smu_funcs_rv *pp_smu = NULL;
 +
 +      if (clk_mgr->pp_smu) {
 +              pp_smu = &clk_mgr->pp_smu->rv_funcs;
 +
 +              if (pp_smu->set_pme_wa_enable)
 +                      pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
 +      }
 +}
 +
  static struct clk_mgr_funcs rv1_clk_funcs = {
        .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
        .update_clocks = rv1_update_clocks,
 +      .enable_pme_wa = rv1_enable_pme_wa,
  };
  
  static struct clk_mgr_internal_funcs rv1_clk_internal_funcs = {
@@@ -22,6 -22,8 +22,8 @@@
   * Authors: AMD
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "dc.h"
  
  #include "dc_link_dp.h"
  
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +#include "dsc.h"
 +#endif
 +
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +#include "vm_helper.h"
 +#endif
 +
  #include "dce/dce_i2c.h"
  
  #define DC_LOGGER \
@@@ -465,7 -459,7 +467,7 @@@ bool dc_stream_program_csc_matrix(struc
                                        pipes,
                                        stream->output_color_space,
                                        stream->csc_color_matrix.matrix,
 -                                      pipes->plane_res.hubp ? pipes->plane_res.hubp->opp_id : 0);
 +                                      pipes->stream_res.opp->inst);
                        ret = true;
                }
        }
@@@ -537,11 -531,6 +539,11 @@@ static void destruct(struct dc *dc
        dc->dcn_ip = NULL;
  
  #endif
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +      kfree(dc->vm_helper);
 +      dc->vm_helper = NULL;
 +
 +#endif
  }
  
  static bool construct(struct dc *dc,
        enum dce_version dc_version = DCE_VERSION_UNKNOWN;
        dc->config = init_params->flags;
  
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +      // Allocate memory for the vm_helper
 +      dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
 +
 +#endif
        memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
  
        dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
        }
  
        dc->dcn_ip = dcn_ip;
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +      dc->soc_bounding_box = init_params->soc_bounding_box;
 +#endif
  #endif
  
        dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
@@@ -695,21 -676,6 +697,21 @@@ fail
        return false;
  }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +static bool disable_all_writeback_pipes_for_stream(
 +              const struct dc *dc,
 +              struct dc_stream_state *stream,
 +              struct dc_state *context)
 +{
 +      int i;
 +
 +      for (i = 0; i < stream->num_wb_info; i++)
 +              stream->writeback_info[i].wb_enabled = false;
 +
 +      return true;
 +}
 +#endif
 +
  static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
  {
        int i, j;
                }
                if (should_disable && old_stream) {
                        dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +                      disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
 +#endif
                        dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
                }
        }
@@@ -1407,9 -1370,6 +1409,9 @@@ static enum surface_update_type det_sur
  
        update_flags->raw = 0; // Reset all flags
  
 +      if (u->flip_addr)
 +              update_flags->bits.addr_update = 1;
 +
        if (!is_surface_in_context(context, u->surface)) {
                update_flags->bits.new_plane = 1;
                return UPDATE_TYPE_FULL;
@@@ -1496,11 -1456,6 +1498,11 @@@ static enum surface_update_type check_u
  
                if (stream_update->dpms_off)
                        return UPDATE_TYPE_FULL;
 +
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +              if (stream_update->wb_update)
 +                      return UPDATE_TYPE_FULL;
 +#endif
        }
  
        for (i = 0 ; i < surface_count; i++) {
@@@ -1645,26 -1600,6 +1647,26 @@@ static void copy_surface_update_to_plan
                        sizeof(struct dc_transfer_func_distributed_points));
        }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      if (srf_update->func_shaper &&
 +                      (surface->in_shaper_func !=
 +                      srf_update->func_shaper))
 +              memcpy(surface->in_shaper_func, srf_update->func_shaper,
 +              sizeof(*surface->in_shaper_func));
 +
 +      if (srf_update->lut3d_func &&
 +                      (surface->lut3d_func !=
 +                      srf_update->lut3d_func))
 +              memcpy(surface->lut3d_func, srf_update->lut3d_func,
 +              sizeof(*surface->lut3d_func));
 +
 +      if (srf_update->blend_tf &&
 +                      (surface->blend_tf !=
 +                      srf_update->blend_tf))
 +              memcpy(surface->blend_tf, srf_update->blend_tf,
 +              sizeof(*surface->blend_tf));
 +
 +#endif
        if (srf_update->input_csc_color_matrix)
                surface->input_csc_color_matrix =
                        *srf_update->input_csc_color_matrix;
                        *srf_update->coeff_reduction_factor;
  }
  
 +static void copy_stream_update_to_stream(struct dc *dc,
 +                                       struct dc_state *context,
 +                                       struct dc_stream_state *stream,
 +                                       const struct dc_stream_update *update)
 +{
 +      if (update == NULL || stream == NULL)
 +              return;
 +
 +      if (update->src.height && update->src.width)
 +              stream->src = update->src;
 +
 +      if (update->dst.height && update->dst.width)
 +              stream->dst = update->dst;
 +
 +      if (update->out_transfer_func &&
 +          stream->out_transfer_func != update->out_transfer_func) {
 +              stream->out_transfer_func->sdr_ref_white_level =
 +                      update->out_transfer_func->sdr_ref_white_level;
 +              stream->out_transfer_func->tf = update->out_transfer_func->tf;
 +              stream->out_transfer_func->type =
 +                      update->out_transfer_func->type;
 +              memcpy(&stream->out_transfer_func->tf_pts,
 +                     &update->out_transfer_func->tf_pts,
 +                     sizeof(struct dc_transfer_func_distributed_points));
 +      }
 +
 +      if (update->hdr_static_metadata)
 +              stream->hdr_static_metadata = *update->hdr_static_metadata;
 +
 +      if (update->abm_level)
 +              stream->abm_level = *update->abm_level;
 +
 +      if (update->periodic_interrupt0)
 +              stream->periodic_interrupt0 = *update->periodic_interrupt0;
 +
 +      if (update->periodic_interrupt1)
 +              stream->periodic_interrupt1 = *update->periodic_interrupt1;
 +
 +      if (update->gamut_remap)
 +              stream->gamut_remap_matrix = *update->gamut_remap;
 +
 +      /* Note: this being updated after mode set is currently not a use case
 +       * however if it arises OCSC would need to be reprogrammed at the
 +       * minimum
 +       */
 +      if (update->output_color_space)
 +              stream->output_color_space = *update->output_color_space;
 +
 +      if (update->output_csc_transform)
 +              stream->csc_color_matrix = *update->output_csc_transform;
 +
 +      if (update->vrr_infopacket)
 +              stream->vrr_infopacket = *update->vrr_infopacket;
 +
 +      if (update->dpms_off)
 +              stream->dpms_off = *update->dpms_off;
 +
 +      if (update->vsc_infopacket)
 +              stream->vsc_infopacket = *update->vsc_infopacket;
 +
 +      if (update->vsp_infopacket)
 +              stream->vsp_infopacket = *update->vsp_infopacket;
 +
 +      if (update->dither_option)
 +              stream->dither_option = *update->dither_option;
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      /* update current stream with writeback info */
 +      if (update->wb_update) {
 +              int i;
 +
 +              stream->num_wb_info = update->wb_update->num_wb_info;
 +              ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
 +              for (i = 0; i < stream->num_wb_info; i++)
 +                      stream->writeback_info[i] =
 +                              update->wb_update->writeback_info[i];
 +      }
 +#endif
 +#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
 +      if (update->dsc_config) {
 +              struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
 +              uint32_t old_dsc_enabled = stream->timing.flags.DSC;
 +              uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
 +                                     update->dsc_config->num_slices_v != 0);
 +
 +              stream->timing.dsc_cfg = *update->dsc_config;
 +              stream->timing.flags.DSC = enable_dsc;
 +              if (!dc->res_pool->funcs->validate_bandwidth(dc, context,
 +                                                           true)) {
 +                      stream->timing.dsc_cfg = old_dsc_cfg;
 +                      stream->timing.flags.DSC = old_dsc_enabled;
 +              }
 +      }
 +#endif
 +}
 +
  static void commit_planes_do_stream_update(struct dc *dc,
                struct dc_stream_state *stream,
                struct dc_stream_update *stream_update,
                                dc_stream_program_csc_matrix(dc, stream);
  
                        if (stream_update->dither_option) {
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +                              struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
 +#endif
                                resource_build_bit_depth_reduction_params(pipe_ctx->stream,
                                                                        &pipe_ctx->stream->bit_depth_params);
                                pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
                                                &stream->bit_depth_params,
                                                &stream->clamping);
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +                              if (odm_pipe)
 +                                      odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
 +                                                      &stream->bit_depth_params,
 +                                                      &stream->clamping);
 +#endif
                        }
  
 +#if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT)
 +                      if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
 +                              dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
 +                              dp_update_dsc_config(pipe_ctx);
 +                              dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
 +                      }
 +#endif
                        /* Full fe update*/
                        if (update_type == UPDATE_TYPE_FAST)
                                continue;
@@@ -1904,30 -1728,6 +1906,30 @@@ static void commit_planes_for_stream(st
                return;
        }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
 +              for (i = 0; i < surface_count; i++) {
 +                      struct dc_plane_state *plane_state = srf_updates[i].surface;
 +                      /*set logical flag for lock/unlock use*/
 +                      for (j = 0; j < dc->res_pool->pipe_count; j++) {
 +                              struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
 +                              if (!pipe_ctx->plane_state)
 +                                      continue;
 +                              if (pipe_ctx->plane_state != plane_state)
 +                                      continue;
 +                              plane_state->triplebuffer_flips = false;
 +                              if (update_type == UPDATE_TYPE_FAST &&
 +                                      dc->hwss.program_triplebuffer != NULL &&
 +                                      !plane_state->flip_immediate &&
 +                                      !dc->debug.disable_tri_buf) {
 +                                              /*triple buffer for VUpdate  only*/
 +                                              plane_state->triplebuffer_flips = true;
 +                              }
 +                      }
 +              }
 +      }
 +#endif
 +
        // Update Type FULL, Surface updates
        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
                        if (update_type == UPDATE_TYPE_FAST)
                                continue;
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +                      ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
 +
 +                      if (dc->hwss.program_triplebuffer != NULL &&
 +                              !dc->debug.disable_tri_buf) {
 +                              /*turn off triple buffer for full update*/
 +                              dc->hwss.program_triplebuffer(
 +                                      dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
 +                      }
 +#endif
                        stream_status =
                                stream_get_status(context, pipe_ctx->stream);
  
                 */
                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +              if (dc->hwss.set_flip_control_gsl)
 +                      for (i = 0; i < surface_count; i++) {
 +                              struct dc_plane_state *plane_state = srf_updates[i].surface;
 +
 +                              for (j = 0; j < dc->res_pool->pipe_count; j++) {
 +                                      struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
 +
 +                                      if (pipe_ctx->stream != stream)
 +                                              continue;
 +
 +                                      if (pipe_ctx->plane_state != plane_state)
 +                                              continue;
 +
 +                                      // GSL has to be used for flip immediate
 +                                      dc->hwss.set_flip_control_gsl(pipe_ctx,
 +                                                      plane_state->flip_immediate);
 +                              }
 +                      }
 +#endif
                /* Perform requested Updates */
                for (i = 0; i < surface_count; i++) {
                        struct dc_plane_state *plane_state = srf_updates[i].surface;
  
                                if (pipe_ctx->plane_state != plane_state)
                                        continue;
 -
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +                              /*program triple buffer after lock based on flip type*/
 +                              if (dc->hwss.program_triplebuffer != NULL &&
 +                                      !dc->debug.disable_tri_buf) {
 +                                      /*only enable triplebuffer for  fast_update*/
 +                                      dc->hwss.program_triplebuffer(
 +                                              dc, pipe_ctx, plane_state->triplebuffer_flips);
 +                              }
 +#endif
                                if (srf_updates[i].flip_addr)
                                        dc->hwss.update_plane_addr(dc, pipe_ctx);
                        }
@@@ -2097,8 -1859,6 +2099,8 @@@ void dc_commit_updates_for_stream(struc
                }
        }
  
 +      copy_stream_update_to_stream(dc, context, stream, stream_update);
 +
        commit_planes_for_stream(
                                dc,
                                srf_updates,
@@@ -2172,12 -1932,6 +2174,12 @@@ void dc_set_power_state
        enum dc_acpi_cm_power_state power_state)
  {
        struct kref refcount;
 +      struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib),
 +                                              GFP_KERNEL);
 +
 +      ASSERT(dml);
 +      if (!dml)
 +              return;
  
        switch (power_state) {
        case DC_ACPI_CM_POWER_STATE_D0:
  
                /* Preserve refcount */
                refcount = dc->current_state->refcount;
 +              /* Preserve display mode lib */
 +              memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
 +
                dc_resource_state_destruct(dc->current_state);
                memset(dc->current_state, 0,
                                sizeof(*dc->current_state));
  
                dc->current_state->refcount = refcount;
 +              dc->current_state->bw_ctx.dml = *dml;
  
                break;
        }
  
 +      kfree(dml);
  }
  
  void dc_resume(struct dc *dc)
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "atom.h"
  #include "dm_helpers.h"
  #include "dpcd_defs.h"
  #include "dmcu.h"
  #include "hw/clk_mgr.h"
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +#include "resource.h"
 +#endif
 +#include "hw/clk_mgr.h"
  
  #define DC_LOGGER_INIT(logger)
  
@@@ -221,11 -219,8 +223,11 @@@ bool dc_link_detect_sink(struct dc_lin
                return true;
        }
  
 -      if (link->connector_signal == SIGNAL_TYPE_EDP)
 +      if (link->connector_signal == SIGNAL_TYPE_EDP) {
 +              /*in case it is not on*/
 +              link->dc->hwss.edp_power_control(link, true);
                link->dc->hwss.edp_wait_for_hpd_ready(link, true);
 +      }
  
        /* todo: may need to lock gpio access */
        hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
@@@ -527,31 -522,11 +529,31 @@@ static void read_edp_current_link_setti
        union lane_count_set lane_count_set = { {0} };
        uint8_t link_bw_set;
        uint8_t link_rate_set;
 +      uint32_t read_dpcd_retry_cnt = 10;
 +      enum dc_status status = DC_ERROR_UNEXPECTED;
 +      int i;
  
        // Read DPCD 00101h to find out the number of lanes currently set
 -      core_link_read_dpcd(link, DP_LANE_COUNT_SET,
 -                      &lane_count_set.raw, sizeof(lane_count_set));
 -      link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
 +      for (i = 0; i < read_dpcd_retry_cnt; i++) {
 +              status = core_link_read_dpcd(
 +                              link,
 +                              DP_LANE_COUNT_SET,
 +                              &lane_count_set.raw,
 +                              sizeof(lane_count_set));
 +              /* First DPCD read after VDD ON can fail if the particular board
 +               * does not have HPD pin wired correctly. So if DPCD read fails,
 +               * which it should never happen, retry a few times. Target worst
 +               * case scenario of 80 ms.
 +               */
 +              if (status == DC_OK) {
 +                      link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
 +                      break;
 +              }
 +
 +              udelay(8000);
 +      }
 +
 +      ASSERT(status == DC_OK);
  
        // Read DPCD 00100h to find if standard link rates are set
        core_link_read_dpcd(link, DP_LINK_BW_SET,
@@@ -705,11 -680,6 +707,11 @@@ bool dc_link_detect(struct dc_link *lin
        if (dc_is_virtual_signal(link->connector_signal))
                return false;
  
 +      if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
 +                      link->connector_signal == SIGNAL_TYPE_EDP) &&
 +                      link->local_sink)
 +              return true;
 +
        if (false == dc_link_detect_sink(link, &new_connection_type)) {
                BREAK_TO_DEBUGGER();
                return false;
                 * up to date, especially if link was powered on by GOP.
                 */
                read_edp_current_link_settings_on_detect(link);
 -              if (link->local_sink)
 -                      return true;
        }
  
 -      if (link->connector_signal == SIGNAL_TYPE_LVDS &&
 -                      link->local_sink)
 -              return true;
 -
        prev_sink = link->local_sink;
        if (prev_sink != NULL) {
                dc_sink_retain(prev_sink);
  
                link->type = dc_connection_none;
                sink_caps.signal = SIGNAL_TYPE_NONE;
 +              /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
 +               *  is not cleared. If we emulate a DP signal on this connection, it thinks
 +               *  the dongle is still there and limits the number of modes we can emulate.
 +               *  Clear dongle_max_pix_clk on disconnect to fix this
 +               */
 +              link->dongle_max_pix_clk = 0;
        }
  
        LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
@@@ -1190,7 -1160,7 +1192,7 @@@ static bool construct
        link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
  
        if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
 -              dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
 +              dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
                         __func__, init_params->connector_index,
                         link->link_id.type, OBJECT_TYPE_CONNECTOR);
                goto create_fail;
@@@ -1508,10 -1478,6 +1510,10 @@@ static enum dc_status enable_link_dp
        if (link_settings.link_rate == LINK_RATE_LOW)
                        skip_video_pattern = false;
  
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      dp_set_fec_ready(link, true);
 +#endif
 +
        if (perform_link_training_with_retries(
                        link,
                        &link_settings,
        else
                status = DC_FAIL_DP_LINK_TRAINING;
  
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      dp_set_fec_enable(link, true);
 +#endif
        return status;
  }
  
@@@ -2148,14 -2111,6 +2150,14 @@@ static void disable_link(struct dc_lin
                        dp_disable_link_phy(link, signal);
                else
                        dp_disable_link_phy_mst(link, signal);
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +
 +              if (dc_is_dp_sst_signal(signal) ||
 +                              link->mst_stream_alloc_table.stream_count == 0) {
 +                      dp_set_fec_enable(link, false);
 +                      dp_set_fec_ready(link, false);
 +              }
 +#endif
        } else
                link->link_enc->funcs->disable_output(link->link_enc, signal);
  
@@@ -2752,30 -2707,13 +2754,30 @@@ void core_link_enable_stream
                if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                        allocate_mst_payload(pipe_ctx);
  
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              if (pipe_ctx->stream->timing.flags.DSC &&
 +                              (dc_is_dp_signal(pipe_ctx->stream->signal) ||
 +                              dc_is_virtual_signal(pipe_ctx->stream->signal))) {
 +                      dp_set_dsc_enable(pipe_ctx, true);
 +                      pipe_ctx->stream_res.tg->funcs->wait_for_state(
 +                                      pipe_ctx->stream_res.tg,
 +                                      CRTC_STATE_VBLANK);
 +              }
 +#endif
                core_dc->hwss.unblank_stream(pipe_ctx,
                        &pipe_ctx->stream->link->cur_link_settings);
  
                if (dc_is_dp_signal(pipe_ctx->stream->signal))
                        enable_stream_features(pipe_ctx);
        }
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
 +              if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
 +                              dc_is_virtual_signal(pipe_ctx->stream->signal))
 +                      dp_set_dsc_enable(pipe_ctx, true);
  
 +      }
 +#endif
  }
  
  void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
        core_dc->hwss.disable_stream(pipe_ctx, option);
  
        disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      if (pipe_ctx->stream->timing.flags.DSC &&
 +                      dc_is_dp_signal(pipe_ctx->stream->signal)) {
 +              dp_set_dsc_enable(pipe_ctx, false);
 +      }
 +#endif
  }
  
  void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
@@@ -2889,14 -2821,6 +2891,14 @@@ uint32_t dc_bandwidth_in_kbps_from_timi
        uint32_t bits_per_channel = 0;
        uint32_t kbps;
  
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      if (timing->flags.DSC) {
 +              kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
 +              kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
 +              return kbps;
 +      }
 +#endif
 +
        switch (timing->display_color_depth) {
        case COLOR_DEPTH_666:
                bits_per_channel = 6;
@@@ -3048,33 -2972,6 +3050,33 @@@ uint32_t dc_link_bandwidth_kbps
        link_bw_kbps *= 8;   /* 8 bits per byte*/
        link_bw_kbps *= link_setting->lane_count;
  
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
 +              /* Account for FEC overhead.
 +               * We have to do it based on caps,
 +               * and not based on FEC being set ready,
 +               * because FEC is set ready too late in
 +               * the process to correctly be picked up
 +               * by mode enumeration.
 +               *
 +               * There's enough zeros at the end of 'kbps'
 +               * that make the below operation 100% precise
 +               * for our purposes.
 +               * 'long long' makes it work even for HDMI 2.1
 +               * max bandwidth (and much, much bigger bandwidths
 +               * than that, actually).
 +               *
 +               * NOTE: Reducing link BW by 3% may not be precise
 +               * because it may be a stream BT that increases by 3%, and so
 +               * 1/1.03 = 0.970873 factor should have been used instead,
 +               * but the difference is minimal and is in a safe direction,
 +               * which all works well around potential ambiguity of DP 1.4a spec.
 +               */
 +              long long fec_link_bw_kbps = link_bw_kbps * 970LL;
 +              link_bw_kbps = (uint32_t)(fec_link_bw_kbps / 1000LL);
 +      }
 +#endif
 +
        return link_bw_kbps;
  
  }
@@@ -3087,3 -2984,4 +3089,3 @@@ const struct dc_link_settings *dc_link_
                return &link->preferred_link_setting;
        return &link->verified_link_cap;
  }
 -
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "dm_helpers.h"
  #include "gpio_service_interface.h"
@@@ -91,8 -93,6 +93,8 @@@ union hdmi_scdc_status_flags_data 
                uint8_t CH2_LOCKED:1;
                uint8_t RESERVED:4;
                uint8_t RESERVED2:8;
 +              uint8_t RESERVED3:8;
 +
        } fields;
  };
  
@@@ -109,10 -109,14 +111,10 @@@ union hdmi_scdc_ced_data 
                uint8_t CH2_7HIGH:7;
                uint8_t CH2_VALID:1;
                uint8_t CHECKSUM:8;
 -      } fields;
 -};
 -
 -union hdmi_scdc_test_config_Data {
 -      uint8_t byte;
 -      struct {
 -              uint8_t TEST_READ_REQUEST_DELAY:7;
 -              uint8_t TEST_READ_REQUEST: 1;
 +              uint8_t RESERVED:8;
 +              uint8_t RESERVED2:8;
 +              uint8_t RESERVED3:8;
 +              uint8_t RESERVED4:4;
        } fields;
  };
  
@@@ -22,6 -22,9 +22,9 @@@
   * Authors: AMD
   *
   */
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "resource.h"
@@@ -46,9 -49,6 +49,9 @@@
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  #include "dcn10/dcn10_resource.h"
  #endif
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +#include "dcn20/dcn20_resource.h"
 +#endif
  #include "dce120/dce120_resource.h"
  
  #define DC_LOGGER_INIT(logger)
@@@ -100,12 -100,6 +103,12 @@@ enum dce_version resource_parse_asic_id
                        dc_version = DCN_VERSION_1_01;
                break;
  #endif
 +
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      case FAMILY_NV:
 +              dc_version = DCN_VERSION_2_0;
 +              break;
 +#endif
        default:
                dc_version = DCE_VERSION_UNKNOWN;
                break;
@@@ -160,12 -154,6 +163,12 @@@ struct resource_pool *dc_create_resourc
  #endif
  
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      case DCN_VERSION_2_0:
 +              res_pool = dcn20_create_resource_pool(init_data, dc);
 +              break;
 +#endif
 +
        default:
                break;
        }
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/delay.h>
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "dc.h"
  #include "core_types.h"
@@@ -105,17 -108,6 +108,17 @@@ static void construct(struct dc_stream_
        /* EDID CAP translation for HDMI 2.0 */
        stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
  
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      memset(&stream->timing.dsc_cfg, 0, sizeof(stream->timing.dsc_cfg));
 +      stream->timing.dsc_cfg.num_slices_h = 0;
 +      stream->timing.dsc_cfg.num_slices_v = 0;
 +      stream->timing.dsc_cfg.bits_per_pixel = 128;
 +      stream->timing.dsc_cfg.block_pred_enable = 1;
 +      stream->timing.dsc_cfg.linebuf_depth = 9;
 +      stream->timing.dsc_cfg.version_minor = 2;
 +      stream->timing.dsc_cfg.ycbcr422_simple = 0;
 +#endif
 +
        update_stream_signal(stream, dc_sink_data);
  
        stream->out_transfer_func = dc_create_transfer_func();
@@@ -366,121 -358,6 +369,121 @@@ bool dc_stream_set_cursor_position
        return true;
  }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +bool dc_stream_add_writeback(struct dc *dc,
 +              struct dc_stream_state *stream,
 +              struct dc_writeback_info *wb_info)
 +{
 +      bool isDrc = false;
 +      int i = 0;
 +      struct dwbc *dwb;
 +
 +      if (stream == NULL) {
 +              dm_error("DC: dc_stream is NULL!\n");
 +              return false;
 +      }
 +
 +      if (wb_info == NULL) {
 +              dm_error("DC: dc_writeback_info is NULL!\n");
 +              return false;
 +      }
 +
 +      if (wb_info->dwb_pipe_inst >= MAX_DWB_PIPES) {
 +              dm_error("DC: writeback pipe is invalid!\n");
 +              return false;
 +      }
 +
 +      wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
 +
 +      dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
 +      dwb->dwb_is_drc = false;
 +
 +      /* recalculate and apply DML parameters */
 +
 +      for (i = 0; i < stream->num_wb_info; i++) {
 +              /*dynamic update*/
 +              if (stream->writeback_info[i].wb_enabled &&
 +                      stream->writeback_info[i].dwb_pipe_inst == wb_info->dwb_pipe_inst) {
 +                      stream->writeback_info[i] = *wb_info;
 +                      isDrc = true;
 +              }
 +      }
 +
 +      if (!isDrc) {
 +              stream->writeback_info[stream->num_wb_info++] = *wb_info;
 +      }
 +
 +      if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
 +              dm_error("DC: update_bandwidth failed!\n");
 +              return false;
 +      }
 +
 +      /* enable writeback */
 +      if (dc->hwss.enable_writeback) {
 +              struct dc_stream_status *stream_status = dc_stream_get_status(stream);
 +              struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
 +
 +              if (dwb->funcs->is_enabled(dwb)) {
 +                      /* writeback pipe already enabled, only need to update */
 +                      dc->hwss.update_writeback(dc, stream_status, wb_info);
 +              } else {
 +                      /* Enable writeback pipe from scratch*/
 +                      dc->hwss.enable_writeback(dc, stream_status, wb_info);
 +              }
 +      }
 +
 +      return true;
 +}
 +
 +bool dc_stream_remove_writeback(struct dc *dc,
 +              struct dc_stream_state *stream,
 +              uint32_t dwb_pipe_inst)
 +{
 +      int i = 0, j = 0;
 +      if (stream == NULL) {
 +              dm_error("DC: dc_stream is NULL!\n");
 +              return false;
 +      }
 +
 +      if (dwb_pipe_inst >= MAX_DWB_PIPES) {
 +              dm_error("DC: writeback pipe is invalid!\n");
 +              return false;
 +      }
 +
 +//    stream->writeback_info[dwb_pipe_inst].wb_enabled = false;
 +      for (i = 0; i < stream->num_wb_info; i++) {
 +              /*dynamic update*/
 +              if (stream->writeback_info[i].wb_enabled &&
 +                      stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) {
 +                      stream->writeback_info[i].wb_enabled = false;
 +              }
 +      }
 +
 +      /* remove writeback info for disabled writeback pipes from stream */
 +      for (i = 0, j = 0; i < stream->num_wb_info; i++) {
 +              if (stream->writeback_info[i].wb_enabled) {
 +                      if (i != j)
 +                              /* trim the array */
 +                              stream->writeback_info[j] = stream->writeback_info[i];
 +                      j++;
 +              }
 +      }
 +      stream->num_wb_info = j;
 +
 +      /* recalculate and apply DML parameters */
 +      if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
 +              dm_error("DC: update_bandwidth failed!\n");
 +              return false;
 +      }
 +
 +      /* disable writeback */
 +      if (dc->hwss.disable_writeback)
 +              dc->hwss.disable_writeback(dc, dwb_pipe_inst);
 +
 +      return true;
 +}
 +#endif
 +
  uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
  {
        uint8_t i;
@@@ -565,77 -442,6 +568,77 @@@ bool dc_stream_get_scanoutpos(const str
  
        return ret;
  }
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
 +{
 +      bool status = true;
 +      struct pipe_ctx *pipe = NULL;
 +      int i;
 +
 +      if (!dc->hwss.dmdata_status_done)
 +              return false;
 +
 +      for (i = 0; i < MAX_PIPES; i++) {
 +              pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 +              if (pipe->stream == stream)
 +                      break;
 +      }
 +      /* Stream not found, by default we'll assume HUBP fetched dm data */
 +      if (i == MAX_PIPES)
 +              return true;
 +
 +      status = dc->hwss.dmdata_status_done(pipe);
 +      return status;
 +}
 +
 +bool dc_stream_set_dynamic_metadata(struct dc *dc,
 +              struct dc_stream_state *stream,
 +              struct dc_dmdata_attributes *attr)
 +{
 +      struct pipe_ctx *pipe_ctx = NULL;
 +      struct hubp *hubp;
 +      int i;
 +
 +      for (i = 0; i < MAX_PIPES; i++) {
 +              pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
 +              if (pipe_ctx->stream == stream)
 +                      break;
 +      }
 +
 +      if (i == MAX_PIPES)
 +              return false;
 +
 +      hubp = pipe_ctx->plane_res.hubp;
 +      if (hubp == NULL)
 +              return false;
 +
 +      pipe_ctx->stream->dmdata_address = attr->address;
 +
 +      if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
 +              if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
 +                      /* if using dynamic meta, don't set up generic infopackets */
 +                      pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
 +                      pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
 +                                      pipe_ctx->stream_res.stream_enc,
 +                                      true, pipe_ctx->plane_res.hubp->inst,
 +                                      dc_is_dp_signal(pipe_ctx->stream->signal) ?
 +                                                      dmdata_dp : dmdata_hdmi);
 +              } else
 +                      pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
 +                                      pipe_ctx->stream_res.stream_enc,
 +                                      false, pipe_ctx->plane_res.hubp->inst,
 +                                      dc_is_dp_signal(pipe_ctx->stream->signal) ?
 +                                                      dmdata_dp : dmdata_hdmi);
 +      }
 +
 +      if (hubp->funcs->dmdata_set_attributes != NULL &&
 +                      pipe_ctx->stream->dmdata_address.quad_part != 0) {
 +              hubp->funcs->dmdata_set_attributes(hubp, attr);
 +      }
 +
 +      return true;
 +}
 +#endif
  
  void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
  {
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/mm.h>
  /* DC interface (public) */
  #include "dm_services.h"
  #include "dc.h"
@@@ -48,25 -50,6 +50,25 @@@ static void construct(struct dc_contex
                plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
                plane_state->in_transfer_func->ctx = ctx;
        }
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      plane_state->in_shaper_func = dc_create_transfer_func();
 +      if (plane_state->in_shaper_func != NULL) {
 +              plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
 +              plane_state->in_shaper_func->ctx = ctx;
 +      }
 +
 +      plane_state->lut3d_func = dc_create_3dlut_func();
 +      if (plane_state->lut3d_func != NULL) {
 +              plane_state->lut3d_func->ctx = ctx;
 +              plane_state->lut3d_func->initialized = false;
 +      }
 +      plane_state->blend_tf = dc_create_transfer_func();
 +      if (plane_state->blend_tf != NULL) {
 +              plane_state->blend_tf->type = TF_TYPE_BYPASS;
 +              plane_state->blend_tf->ctx = ctx;
 +      }
 +
 +#endif
  }
  
  static void destruct(struct dc_plane_state *plane_state)
                                plane_state->in_transfer_func);
                plane_state->in_transfer_func = NULL;
        }
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      if (plane_state->in_shaper_func != NULL) {
 +              dc_transfer_func_release(
 +                              plane_state->in_shaper_func);
 +              plane_state->in_shaper_func = NULL;
 +      }
 +      if (plane_state->lut3d_func != NULL) {
 +              dc_3dlut_func_release(
 +                              plane_state->lut3d_func);
 +              plane_state->lut3d_func = NULL;
 +      }
 +      if (plane_state->blend_tf != NULL) {
 +              dc_transfer_func_release(
 +                              plane_state->blend_tf);
 +              plane_state->blend_tf = NULL;
 +      }
 +
 +#endif
  }
  
  /*******************************************************************************
@@@ -261,40 -226,4 +263,40 @@@ alloc_fail
        return NULL;
  }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +static void dc_3dlut_func_free(struct kref *kref)
 +{
 +      struct dc_3dlut *lut = container_of(kref, struct dc_3dlut, refcount);
 +
 +      kvfree(lut);
 +}
 +
 +struct dc_3dlut *dc_create_3dlut_func(void)
 +{
 +      struct dc_3dlut *lut = kvzalloc(sizeof(*lut), GFP_KERNEL);
 +
 +      if (lut == NULL)
 +              goto alloc_fail;
 +
 +      kref_init(&lut->refcount);
 +      lut->initialized = false;
 +
 +      return lut;
 +
 +alloc_fail:
 +      return NULL;
 +
 +}
 +
 +void dc_3dlut_func_release(struct dc_3dlut *lut)
 +{
 +      kref_put(&lut->refcount, dc_3dlut_func_free);
 +}
 +
 +void dc_3dlut_func_retain(struct dc_3dlut *lut)
 +{
 +      kref_get(&lut->refcount);
 +}
 +#endif
 +
  
@@@ -22,7 -22,9 +22,9 @@@
   * Authors: AMD
   *
   */
- #include "../dc.h"
+ #include <linux/slab.h>
  #include "reg_helper.h"
  #include "dce_audio.h"
  #include "dce/dce_11_0_d.h"
@@@ -841,6 -843,8 +843,6 @@@ void dce_aud_wall_dto_setup
                REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
                                DCCG_AUDIO_DTO_SEL, 1);
  
 -              REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
 -                      DCCG_AUDIO_DTO_SEL, 1);
                        /* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1)
                         * Select 512fs for DP TODO: web register definition
                         * does not match register header file
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  
@@@ -53,8 -55,6 +55,8 @@@
  #define CALC_PLL_CLK_SRC_ERR_TOLERANCE 1
  #define MAX_PLL_CALC_ERROR 0xFFFFFFFF
  
 +#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
 +
  static const struct spread_spectrum_data *get_ss_data_entry(
                struct dce110_clk_src *clk_src,
                enum signal_type signal,
@@@ -1002,67 -1002,6 +1004,67 @@@ static bool get_pixel_clk_frequency_100
        return false;
  }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +
 +/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
 +struct pixel_rate_range_table_entry {
 +      unsigned int range_min_khz;
 +      unsigned int range_max_khz;
 +      unsigned int target_pixel_rate_khz;
 +      unsigned short mult_factor;
 +      unsigned short div_factor;
 +};
 +
 +static const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
 +      // /1.001 rates
 +      {25170, 25180, 25200, 1000, 1001},      //25.2MHz   ->   25.17
 +      {59340, 59350, 59400, 1000, 1001},      //59.4Mhz   ->   59.340
 +      {74170, 74180, 74250, 1000, 1001},      //74.25Mhz  ->   74.1758
 +      {125870, 125880, 126000, 1000, 1001},   //126Mhz    ->  125.87
 +      {148350, 148360, 148500, 1000, 1001},   //148.5Mhz  ->  148.3516
 +      {167830, 167840, 168000, 1000, 1001},   //168Mhz    ->  167.83
 +      {222520, 222530, 222750, 1000, 1001},   //222.75Mhz ->  222.527
 +      {257140, 257150, 257400, 1000, 1001},   //257.4Mhz  ->  257.1429
 +      {296700, 296710, 297000, 1000, 1001},   //297Mhz    ->  296.7033
 +      {342850, 342860, 343200, 1000, 1001},   //343.2Mhz  ->  342.857
 +      {395600, 395610, 396000, 1000, 1001},   //396Mhz    ->  395.6
 +      {409090, 409100, 409500, 1000, 1001},   //409.5Mhz  ->  409.091
 +      {445050, 445060, 445500, 1000, 1001},   //445.5Mhz  ->  445.055
 +      {467530, 467540, 468000, 1000, 1001},   //468Mhz    ->  467.5325
 +      {519230, 519240, 519750, 1000, 1001},   //519.75Mhz ->  519.231
 +      {525970, 525980, 526500, 1000, 1001},   //526.5Mhz  ->  525.974
 +      {545450, 545460, 546000, 1000, 1001},   //546Mhz    ->  545.455
 +      {593400, 593410, 594000, 1000, 1001},   //594Mhz    ->  593.4066
 +      {623370, 623380, 624000, 1000, 1001},   //624Mhz    ->  623.377
 +      {692300, 692310, 693000, 1000, 1001},   //693Mhz    ->  692.308
 +      {701290, 701300, 702000, 1000, 1001},   //702Mhz    ->  701.2987
 +      {791200, 791210, 792000, 1000, 1001},   //792Mhz    ->  791.209
 +      {890100, 890110, 891000, 1000, 1001},   //891Mhz    ->  890.1099
 +      {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz   -> 1186.8131
 +
 +      // *1.001 rates
 +      {27020, 27030, 27000, 1001, 1000}, //27Mhz
 +      {54050, 54060, 54000, 1001, 1000}, //54Mhz
 +      {108100, 108110, 108000, 1001, 1000},//108Mhz
 +};
 +
 +static bool dcn20_program_pix_clk(
 +              struct clock_source *clock_source,
 +              struct pixel_clk_params *pix_clk_params,
 +              struct pll_settings *pll_settings)
 +{
 +      dce112_program_pix_clk(clock_source, pix_clk_params, pll_settings);
 +
 +      return true;
 +}
 +
 +static const struct clock_source_funcs dcn20_clk_src_funcs = {
 +      .cs_power_down = dce110_clock_source_power_down,
 +      .program_pix_clk = dcn20_program_pix_clk,
 +      .get_pix_clk_dividers = dce112_get_pix_clk_dividers
 +};
 +#endif
 +
  /*****************************************/
  /* Constructor                           */
  /*****************************************/
@@@ -1439,20 -1378,3 +1441,20 @@@ bool dce112_clk_src_construct
        return true;
  }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +bool dcn20_clk_src_construct(
 +      struct dce110_clk_src *clk_src,
 +      struct dc_context *ctx,
 +      struct dc_bios *bios,
 +      enum clock_source_id id,
 +      const struct dce110_clk_src_regs *regs,
 +      const struct dce110_clk_src_shift *cs_shift,
 +      const struct dce110_clk_src_mask *cs_mask)
 +{
 +      bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask);
 +
 +      clk_src->base.funcs = &dcn20_clk_src_funcs;
 +
 +      return ret;
 +}
 +#endif
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/delay.h>
+ #include <linux/slab.h>
  #include "core_types.h"
  #include "link_encoder.h"
  #include "dce_dmcu.h"
@@@ -726,56 -729,6 +729,56 @@@ static bool dcn10_is_dmcu_initialized(s
  
  #endif //(CONFIG_DRM_AMD_DC_DCN1_0)
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +
 +static bool dcn20_lock_phy(struct dmcu *dmcu)
 +{
 +      struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
 +
 +      /* If microcontroller is not running, do nothing */
 +      if (dmcu->dmcu_state != DMCU_RUNNING)
 +              return false;
 +
 +      /* waitDMCUReadyForCmd */
 +      REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
 +
 +      /* setDMCUParam_Cmd */
 +      REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_SYNC_PHY_LOCK);
 +
 +      /* notifyDMCUMsg */
 +      REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
 +
 +      /* waitDMCUReadyForCmd */
 +      REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
 +
 +      return true;
 +}
 +
 +static bool dcn20_unlock_phy(struct dmcu *dmcu)
 +{
 +      struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
 +
 +      /* If microcontroller is not running, do nothing */
 +      if (dmcu->dmcu_state != DMCU_RUNNING)
 +              return false;
 +
 +      /* waitDMCUReadyForCmd */
 +      REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
 +
 +      /* setDMCUParam_Cmd */
 +      REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_SYNC_PHY_UNLOCK);
 +
 +      /* notifyDMCUMsg */
 +      REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
 +
 +      /* waitDMCUReadyForCmd */
 +      REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
 +
 +      return true;
 +}
 +
 +#endif //(CONFIG_DRM_AMD_DC_DCN2_0)
 +
  static const struct dmcu_funcs dce_funcs = {
        .dmcu_init = dce_dmcu_init,
        .load_iram = dce_dmcu_load_iram,
@@@ -800,21 -753,6 +803,21 @@@ static const struct dmcu_funcs dcn10_fu
  };
  #endif
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +static const struct dmcu_funcs dcn20_funcs = {
 +      .dmcu_init = dcn10_dmcu_init,
 +      .load_iram = dcn10_dmcu_load_iram,
 +      .set_psr_enable = dcn10_dmcu_set_psr_enable,
 +      .setup_psr = dcn10_dmcu_setup_psr,
 +      .get_psr_state = dcn10_get_dmcu_psr_state,
 +      .set_psr_wait_loop = dcn10_psr_wait_loop,
 +      .get_psr_wait_loop = dcn10_get_psr_wait_loop,
 +      .is_dmcu_initialized = dcn10_is_dmcu_initialized,
 +      .lock_phy = dcn20_lock_phy,
 +      .unlock_phy = dcn20_unlock_phy
 +};
 +#endif
 +
  static void dce_dmcu_construct(
        struct dce_dmcu *dmcu_dce,
        struct dc_context *ctx,
@@@ -877,29 -815,6 +880,29 @@@ struct dmcu *dcn10_dmcu_create
  }
  #endif
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +struct dmcu *dcn20_dmcu_create(
 +      struct dc_context *ctx,
 +      const struct dce_dmcu_registers *regs,
 +      const struct dce_dmcu_shift *dmcu_shift,
 +      const struct dce_dmcu_mask *dmcu_mask)
 +{
 +      struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
 +
 +      if (dmcu_dce == NULL) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      dce_dmcu_construct(
 +              dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
 +
 +      dmcu_dce->base.funcs = &dcn20_funcs;
 +
 +      return &dmcu_dce->base;
 +}
 +#endif
 +
  void dce_dmcu_destroy(struct dmcu **dmcu)
  {
        struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
@@@ -22,6 -22,9 +22,9 @@@
   * Authors: AMD
   *
   */
+ #include <linux/delay.h>
  #include "dce_i2c.h"
  #include "dce_i2c_hw.h"
  #include "reg_helper.h"
@@@ -149,36 -152,6 +152,36 @@@ static void process_channel_reply
        }
  }
  
 +static bool is_engine_available(struct dce_i2c_hw *dce_i2c_hw)
 +{
 +      unsigned int arbitrate;
 +      unsigned int i2c_hw_status;
 +
 +      REG_GET(HW_STATUS, DC_I2C_DDC1_HW_STATUS, &i2c_hw_status);
 +      if (i2c_hw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW)
 +              return false;
 +
 +      REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
 +      if (arbitrate == DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY)
 +              return false;
 +
 +      return true;
 +}
 +
 +static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw)
 +{
 +      uint32_t i2c_sw_status = 0;
 +
 +      REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
 +      if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
 +              return false;
 +
 +      if (is_engine_available(dce_i2c_hw))
 +              return false;
 +
 +      return true;
 +}
 +
  static bool process_transaction(
        struct dce_i2c_hw *dce_i2c_hw,
        struct i2c_request_transaction_data *request)
        bool last_transaction = false;
        uint32_t value = 0;
  
 +      if (is_hw_busy(dce_i2c_hw)) {
 +              request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
 +              return false;
 +      }
 +
        last_transaction = ((dce_i2c_hw->transaction_count == 3) ||
                        (request->action == DCE_I2C_TRANSACTION_ACTION_I2C_WRITE) ||
                        (request->action & DCE_I2C_TRANSACTION_ACTION_I2C_READ));
@@@ -303,12 -271,6 +306,12 @@@ static bool setup_engine
        struct dce_i2c_hw *dce_i2c_hw)
  {
        uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      uint32_t  reset_length = 0;
 +#endif
 +      /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
 +      REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
 +
        /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
        REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
  
                REG_UPDATE_N(SETUP, 2,
                             FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
                             FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      } else {
 +              reset_length = dce_i2c_hw->send_reset_length;
 +              REG_UPDATE_N(SETUP, 3,
 +                           FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
 +                           FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_SEND_RESET_LENGTH), reset_length,
 +                           FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
 +#endif
        }
        /* Program HW priority
         * set to High - interrupt software I2C at any time
         * Enable restart of SW I2C that was interrupted by HW
         * disable queuing of software while I2C is in use by HW
         */
 -      REG_UPDATE_2(DC_I2C_ARBITRATION,
 -                   DC_I2C_NO_QUEUED_SW_GO, 0,
 -                   DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
 +      REG_UPDATE(DC_I2C_ARBITRATION,
 +                      DC_I2C_NO_QUEUED_SW_GO, 0);
  
        return true;
  }
  
 -static bool is_hw_busy(struct dce_i2c_hw *dce_i2c_hw)
 -{
 -      uint32_t i2c_sw_status = 0;
 -
 -      REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
 -      if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
 -              return false;
 -
 -      reset_hw_engine(dce_i2c_hw);
 -
 -      REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
 -      return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
 -}
 -
  static void release_engine(
        struct dce_i2c_hw *dce_i2c_hw)
  {
  
  }
  
 -static bool is_engine_available(struct dce_i2c_hw *dce_i2c_hw)
 -{
 -      unsigned int arbitrate;
 -
 -      REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
 -      if (arbitrate == DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY)
 -              return false;
 -      return true;
 -}
 -
  struct dce_i2c_hw *acquire_i2c_hw_engine(
        struct resource_pool *pool,
        struct ddc *ddc)
@@@ -480,7 -459,6 +483,7 @@@ static void submit_channel_request_hw
                request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
                return;
        }
 +      reset_hw_engine(dce_i2c_hw);
  
        execute_transaction(dce_i2c_hw);
  
@@@ -712,23 -690,3 +715,23 @@@ void dcn1_i2c_hw_construct
        dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCN;
  }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +void dcn2_i2c_hw_construct(
 +      struct dce_i2c_hw *dce_i2c_hw,
 +      struct dc_context *ctx,
 +      uint32_t engine_id,
 +      const struct dce_i2c_registers *regs,
 +      const struct dce_i2c_shift *shifts,
 +      const struct dce_i2c_mask *masks)
 +{
 +      dcn1_i2c_hw_construct(dce_i2c_hw,
 +                      ctx,
 +                      engine_id,
 +                      regs,
 +                      shifts,
 +                      masks);
 +      dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_9;
 +      if (ctx->dc->debug.scl_reset_length10)
 +              dce_i2c_hw->send_reset_length = I2C_SEND_RESET_LENGTH_10;
 +}
 +#endif
@@@ -22,6 -22,9 +22,9 @@@
   * Authors: AMD
   *
   */
+ #include <linux/delay.h>
  #include "dm_services.h"
  #include "dc.h"
  #include "dc_bios_types.h"
@@@ -666,26 -669,7 +669,26 @@@ void dce110_enable_stream(struct pipe_c
  
        /* update AVI info frame (HDMI, DP)*/
        /* TODO: FPGA may change to hwss.update_info_frame */
 -      dce110_update_info_frame(pipe_ctx);
 +
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL &&
 +                      pipe_ctx->plane_res.hubp != NULL) {
 +              if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
 +                      /* if using dynamic meta, don't set up generic infopackets */
 +                      pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
 +                      pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
 +                                      pipe_ctx->stream_res.stream_enc,
 +                                      true, pipe_ctx->plane_res.hubp->inst,
 +                                      dc_is_dp_signal(pipe_ctx->stream->signal) ?
 +                                                      dmdata_dp : dmdata_hdmi);
 +              } else
 +                      pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata(
 +                                      pipe_ctx->stream_res.stream_enc,
 +                                      false, pipe_ctx->plane_res.hubp->inst,
 +                                      dc_is_dp_signal(pipe_ctx->stream->signal) ?
 +                                                      dmdata_dp : dmdata_hdmi);
 +      }
 +#endif
  
        /* enable early control to avoid corruption on DP monitor*/
        active_total_with_borders =
@@@ -958,12 -942,26 +961,12 @@@ void hwss_edp_backlight_control
                edp_receiver_ready_T9(link);
  }
  
 -// Static helper function which calls the correct function
 -// based on pp_smu version
 -static void set_pme_wa_enable_by_version(struct dc *dc)
 -{
 -      struct pp_smu_funcs *pp_smu = NULL;
 -
 -      if (dc->res_pool->pp_smu)
 -              pp_smu = dc->res_pool->pp_smu;
 -
 -      if (pp_smu) {
 -              if (pp_smu->ctx.ver == PP_SMU_VER_RV && pp_smu->rv_funcs.set_pme_wa_enable)
 -                      pp_smu->rv_funcs.set_pme_wa_enable(&(pp_smu->ctx));
 -      }
 -}
 -
  void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
  {
        /* notify audio driver for audio modes of monitor */
        struct dc *core_dc = pipe_ctx->stream->ctx->dc;
        struct pp_smu_funcs *pp_smu = NULL;
 +      struct clk_mgr *clk_mgr = core_dc->clk_mgr;
        unsigned int i, num_audio = 1;
  
        if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
  
                pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
  
 -              if (num_audio >= 1 && pp_smu != NULL)
 +              if (num_audio >= 1 && clk_mgr->funcs->enable_pme_wa)
                        /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
 -                      set_pme_wa_enable_by_version(core_dc);
 +                      clk_mgr->funcs->enable_pme_wa(clk_mgr);
                /* un-mute audio */
                /* TODO: audio should be per stream rather than per link */
                pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
@@@ -997,7 -995,6 +1000,7 @@@ void dce110_disable_audio_stream(struc
  {
        struct dc *dc = pipe_ctx->stream->ctx->dc;
        struct pp_smu_funcs *pp_smu = NULL;
 +      struct clk_mgr *clk_mgr = dc->clk_mgr;
  
        if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
                return;
                        update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
                        pipe_ctx->stream_res.audio = NULL;
                }
 -              if (pp_smu != NULL)
 +              if (clk_mgr->funcs->enable_pme_wa)
                        /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
 -                      set_pme_wa_enable_by_version(dc);
 +                      clk_mgr->funcs->enable_pme_wa(clk_mgr);
  
                /* TODO: notify audio driver for if audio modes list changed
                 * add audio mode list change flag */
@@@ -1343,9 -1340,6 +1346,9 @@@ static enum dc_status apply_single_cont
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct drr_params params = {0};
        unsigned int event_triggers = 0;
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
 +#endif
  
        if (dc->hwss.disable_stream_gating) {
                dc->hwss.disable_stream_gating(dc, pipe_ctx);
                pipe_ctx->stream_res.opp,
                &stream->bit_depth_params,
                &stream->clamping);
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      if (odm_pipe) {
 +              odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion(
 +                              odm_pipe->stream_res.opp,
 +                              COLOR_SPACE_YCBCR601,
 +                              stream->timing.display_color_depth,
 +                              stream->signal);
 +
 +              odm_pipe->stream_res.opp->funcs->opp_program_fmt(
 +                              odm_pipe->stream_res.opp,
 +                              &stream->bit_depth_params,
 +                              &stream->clamping);
 +      }
 +#endif
  
        if (!stream->dpms_off)
                core_link_enable_stream(context, pipe_ctx);
@@@ -1530,18 -1510,6 +1533,18 @@@ static void disable_vga_and_power_gate_
        }
  }
  
 +
 +static struct dc_stream_state *get_edp_stream(struct dc_state *context)
 +{
 +      int i;
 +
 +      for (i = 0; i < context->stream_count; i++) {
 +              if (context->streams[i]->signal == SIGNAL_TYPE_EDP)
 +                      return context->streams[i];
 +      }
 +      return NULL;
 +}
 +
  static struct dc_link *get_edp_link(struct dc *dc)
  {
        int i;
@@@ -1585,16 -1553,12 +1588,16 @@@ void dce110_enable_accelerated_mode(str
        int i;
        struct dc_link *edp_link_with_sink = get_edp_link_with_sink(dc, context);
        struct dc_link *edp_link = get_edp_link(dc);
 +      struct dc_stream_state *edp_stream = NULL;
        bool can_apply_edp_fast_boot = false;
        bool can_apply_seamless_boot = false;
 +      bool keep_edp_vdd_on = false;
  
        if (dc->hwss.init_pipes)
                dc->hwss.init_pipes(dc, context);
  
 +      edp_stream = get_edp_stream(context);
 +
        // Check fastboot support, disable on DCE8 because of blank screens
        if (edp_link && dc->ctx->dce_version != DCE_VERSION_8_0 &&
                    dc->ctx->dce_version != DCE_VERSION_8_1 &&
  
                // enable fastboot if backend is enabled on eDP
                if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) {
 -                      /* Find eDP stream and set optimization flag */
 -                      for (i = 0; i < context->stream_count; i++) {
 -                              if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
 -                                      context->streams[i]->apply_edp_fast_boot_optimization = true;
 -                                      can_apply_edp_fast_boot = true;
 -                                      break;
 -                              }
 +                      /* Set optimization flag on eDP stream*/
 +                      if (edp_stream) {
 +                              edp_stream->apply_edp_fast_boot_optimization = true;
 +                              can_apply_edp_fast_boot = true;
                        }
                }
 +
 +              // We are trying to enable eDP, don't power down VDD
 +              if (edp_stream)
 +                      keep_edp_vdd_on = true;
        }
  
        // Check seamless boot support
         * it should get turned off
         */
        if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
 -              if (edp_link_with_sink) {
 +              if (edp_link_with_sink && !keep_edp_vdd_on) {
                        /*turn off backlight before DP_blank and encoder powered down*/
                        dc->hwss.edp_backlight_control(edp_link_with_sink, false);
                }
                /*resume from S3, no vbios posting, no need to power down again*/
                power_down_all_hw_blocks(dc);
                disable_vga_and_power_gate_all_controllers(dc);
 -              if (edp_link_with_sink)
 +              if (edp_link_with_sink && !keep_edp_vdd_on)
                        dc->hwss.edp_power_control(edp_link_with_sink, false);
        }
        bios_set_scratch_acc_mode_change(dc->ctx->dc_bios);
@@@ -24,6 -24,8 +24,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  
@@@ -1163,6 -1165,16 +1165,6 @@@ static bool construct
        if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs))
                goto res_create_fail;
  
 -      /*
 -       * This is a bit of a hack. The xGMI enabled info is used to determine
 -       * if audio and display clocks need to be adjusted with the WAFL link's
 -       * SS info. This is a responsiblity of the clk_mgr. But since MMHUB is
 -       * under hwseq, and the relevant register is in MMHUB, we have to do it
 -       * here.
 -       */
 -      if (is_vg20 && dce121_xgmi_enabled(dc->hwseq))
 -              dce121_clock_patch_xgmi_ss_info(dc->clk_mgr);
 -
        /* Create hardware sequencer */
        if (!dce120_hw_sequencer_create(dc))
                goto controller_create_fail;
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/delay.h>
  #include "dm_services.h"
  #include "dcn10_hubp.h"
  #include "dcn10_hubbub.h"
@@@ -145,7 -147,6 +147,7 @@@ bool hubbub1_verify_allow_pstate_change
                forced_pstate_allow = false;
        }
  
 +#ifdef CONFIG_DRM_AMD_DC_DCN1_01
        /* RV2:
         * dchubbubdebugind, at: 0xB
         * description
         * 29:    WB1 Allow Pstate Change
         * 30:    Arbiter's allow_pstate_change
         * 31:    SOC pstate change request"
 -       *
 -       * RV1:
 +       */
 +#else
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +      /*DCN2.x:
 +      HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
 +      0: Pipe0 Plane0 Allow P-state Change
 +      1: Pipe0 Plane1 Allow P-state Change
 +      2: Pipe0 Cursor0 Allow P-state Change
 +      3: Pipe0 Cursor1 Allow P-state Change
 +      4: Pipe1 Plane0 Allow P-state Change
 +      5: Pipe1 Plane1 Allow P-state Change
 +      6: Pipe1 Cursor0 Allow P-state Change
 +      7: Pipe1 Cursor1 Allow P-state Change
 +      8: Pipe2 Plane0 Allow P-state Change
 +      9: Pipe2 Plane1 Allow P-state Change
 +      10: Pipe2 Cursor0 Allow P-state Change
 +      11: Pipe2 Cursor1 Allow P-state Change
 +      12: Pipe3 Plane0 Allow P-state Change
 +      13: Pipe3 Plane1 Allow P-state Change
 +      14: Pipe3 Cursor0 Allow P-state Change
 +      15: Pipe3 Cursor1 Allow P-state Change
 +      16: Pipe4 Plane0 Allow P-state Change
 +      17: Pipe4 Plane1 Allow P-state Change
 +      18: Pipe4 Cursor0 Allow P-state Change
 +      19: Pipe4 Cursor1 Allow P-state Change
 +      20: Pipe5 Plane0 Allow P-state Change
 +      21: Pipe5 Plane1 Allow P-state Change
 +      22: Pipe5 Cursor0 Allow P-state Change
 +      23: Pipe5 Cursor1 Allow P-state Change
 +      24: Pipe6 Plane0 Allow P-state Change
 +      25: Pipe6 Plane1 Allow P-state Change
 +      26: Pipe6 Cursor0 Allow P-state Change
 +      27: Pipe6 Cursor1 Allow P-state Change
 +      28: WB0 Allow P-state Change
 +      29: WB1 Allow P-state Change
 +      30: Arbiter`s Allow P-state Change
 +      31: SOC P-state Change request
 +      */
 +#else
 +      /* RV1:
         * dchubbubdebugind, at: 0x7
         * description "3-0:   Pipe0 cursor0 QOS
         * 7-4:   Pipe1 cursor0 QOS
         * 30:    Arbiter's allow_pstate_change
         * 31:    SOC pstate change request
         */
 +#endif
 +#endif
  
        REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate);
  
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "dcn10_ipp.h"
  #include "reg_helper.h"
@@@ -51,12 -53,6 +53,12 @@@ static const struct ipp_funcs dcn10_ipp
        .ipp_destroy                    = dcn10_ipp_destroy
  };
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +static const struct ipp_funcs dcn20_ipp_funcs = {
 +      .ipp_destroy                    = dcn10_ipp_destroy
 +};
 +#endif
 +
  void dcn10_ipp_construct(
        struct dcn10_ipp *ippn10,
        struct dc_context *ctx,
        ippn10->ipp_mask = ipp_mask;
  }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +void dcn20_ipp_construct(
 +      struct dcn10_ipp *ippn10,
 +      struct dc_context *ctx,
 +      int inst,
 +      const struct dcn10_ipp_registers *regs,
 +      const struct dcn10_ipp_shift *ipp_shift,
 +      const struct dcn10_ipp_mask *ipp_mask)
 +{
 +      ippn10->base.ctx = ctx;
 +      ippn10->base.inst = inst;
 +      ippn10->base.funcs = &dcn20_ipp_funcs;
 +
 +      ippn10->regs = regs;
 +      ippn10->ipp_shift = ipp_shift;
 +      ippn10->ipp_mask = ipp_mask;
 +}
 +#endif
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/delay.h>
+ #include <linux/slab.h>
  #include "reg_helper.h"
  
  #include "core_types.h"
@@@ -229,9 -232,7 +232,9 @@@ static void setup_panel_mode
  {
        uint32_t value;
  
 -      ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
 +      if (!REG(DP_DPHY_INTERNAL_CTRL))
 +              return;
 +
        value = REG_READ(DP_DPHY_INTERNAL_CTRL);
  
        switch (panel_mode) {
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "dcn10_opp.h"
  #include "reg_helper.h"
@@@ -365,11 -367,6 +367,11 @@@ void opp1_program_oppbuf
         */
        REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition);
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      /* Controls the number of padded pixels at the end of a segment */
 +      if (REG(OPPBUF_CONTROL1))
 +              REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels);
 +#endif
  }
  
  void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
@@@ -396,9 -393,6 +398,9 @@@ static const struct opp_funcs dcn10_opp
                .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
                .opp_program_stereo = opp1_program_stereo,
                .opp_pipe_clock_control = opp1_pipe_clock_control,
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +              .opp_set_disp_pattern_generator = NULL,
 +#endif
                .opp_destroy = opp1_destroy
  };
  
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "dc.h"
  
@@@ -560,7 -562,6 +562,7 @@@ static const struct dc_debug_options de
                .az_endpoint_mute_only = true,
                .recovery_enabled = false, /*enable this by default after testing.*/
                .max_downscale_src_width = 3840,
 +              .underflow_assert_delay_us = 0xFFFFFFFF,
  };
  
  static const struct dc_debug_options debug_defaults_diags = {
                .clock_trace = true,
                .disable_stutter = true,
                .disable_pplib_clock_request = true,
 -              .disable_pplib_wm_range = true
 +              .disable_pplib_wm_range = true,
 +              .underflow_assert_delay_us = 0xFFFFFFFF,
  };
  
  static void dcn10_dpp_destroy(struct dpp **dpp)
index 23362dd,0000000..51a3dfe
mode 100644,000000..100644
--- /dev/null
@@@ -1,157 -1,0 +1,159 @@@
 +/*
 + * Copyright 2018 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/slab.h>
++
 +#include "reg_helper.h"
 +#include "core_types.h"
 +#include "dcn20_dccg.h"
 +
 +#define TO_DCN_DCCG(dccg)\
 +      container_of(dccg, struct dcn_dccg, base)
 +
 +#define REG(reg) \
 +      (dccg_dcn->regs->reg)
 +
 +#undef FN
 +#define FN(reg_name, field_name) \
 +      dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
 +
 +#define CTX \
 +      dccg_dcn->base.ctx
 +#define DC_LOGGER \
 +      dccg->ctx->logger
 +
 +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
 +{
 +      struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
 +
 +      if (dccg->ref_dppclk && req_dppclk) {
 +              int ref_dppclk = dccg->ref_dppclk;
 +
 +              ASSERT(req_dppclk <= ref_dppclk);
 +              /* need to clamp to 8 bits */
 +              if (ref_dppclk > 0xff) {
 +                      int divider = (ref_dppclk + 0xfe) / 0xff;
 +
 +                      ref_dppclk /= divider;
 +                      req_dppclk = (req_dppclk + divider - 1) / divider;
 +                      if (req_dppclk > ref_dppclk)
 +                              req_dppclk = ref_dppclk;
 +              }
 +              REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
 +                              DPPCLK0_DTO_PHASE, req_dppclk,
 +                              DPPCLK0_DTO_MODULO, ref_dppclk);
 +              REG_UPDATE(DPPCLK_DTO_CTRL,
 +                              DPPCLK_DTO_ENABLE[dpp_inst], 1);
 +      } else {
 +              REG_UPDATE(DPPCLK_DTO_CTRL,
 +                              DPPCLK_DTO_ENABLE[dpp_inst], 0);
 +      }
 +}
 +
 +void dccg2_get_dccg_ref_freq(struct dccg *dccg,
 +              unsigned int xtalin_freq_inKhz,
 +              unsigned int *dccg_ref_freq_inKhz)
 +{
 +      struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
 +      uint32_t clk_en = 0;
 +      uint32_t clk_sel = 0;
 +
 +      REG_GET_2(REFCLK_CNTL, REFCLK_CLOCK_EN, &clk_en, REFCLK_SRC_SEL, &clk_sel);
 +
 +      if (clk_en != 0) {
 +              // DCN20 has never been validated for non-xtalin as reference
 +              // frequency.  There's actually no way for DC to determine what
 +              // frequency a non-xtalin source is.
 +              ASSERT_CRITICAL(false);
 +      }
 +
 +      *dccg_ref_freq_inKhz = xtalin_freq_inKhz;
 +
 +      return;
 +}
 +
 +void dccg2_init(struct dccg *dccg)
 +{
 +      struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
 +
 +      // Fallthrough intentional to program all available dpp_dto's
 +      switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
 +      case 6:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
 +      case 5:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
 +      case 4:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
 +      case 3:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
 +      case 2:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
 +      case 1:
 +              REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
 +              break;
 +      default:
 +              ASSERT(false);
 +              break;
 +      }
 +}
 +
 +static const struct dccg_funcs dccg2_funcs = {
 +      .update_dpp_dto = dccg2_update_dpp_dto,
 +      .get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
 +      .dccg_init = dccg2_init
 +};
 +
 +struct dccg *dccg2_create(
 +      struct dc_context *ctx,
 +      const struct dccg_registers *regs,
 +      const struct dccg_shift *dccg_shift,
 +      const struct dccg_mask *dccg_mask)
 +{
 +      struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
 +      struct dccg *base;
 +
 +      if (dccg_dcn == NULL) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      base = &dccg_dcn->base;
 +      base->ctx = ctx;
 +      base->funcs = &dccg2_funcs;
 +
 +      dccg_dcn->regs = regs;
 +      dccg_dcn->dccg_shift = dccg_shift;
 +      dccg_dcn->dccg_mask = dccg_mask;
 +
 +      return &dccg_dcn->base;
 +}
 +
 +void dcn_dccg_destroy(struct dccg **dccg)
 +{
 +      struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(*dccg);
 +
 +      kfree(dccg_dcn);
 +      *dccg = NULL;
 +}
index c5ac259,0000000..a8ba7d1
mode 100644,000000..100644
--- /dev/null
@@@ -1,3175 -1,0 +1,3177 @@@
 +/*
 +* Copyright 2016 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/slab.h>
++
 +#include "dm_services.h"
 +#include "dc.h"
 +
 +#include "resource.h"
 +#include "include/irq_service_interface.h"
 +#include "dcn20/dcn20_resource.h"
 +
 +#include "dcn10/dcn10_hubp.h"
 +#include "dcn10/dcn10_ipp.h"
 +#include "dcn20_hubbub.h"
 +#include "dcn20_mpc.h"
 +#include "dcn20_hubp.h"
 +#include "irq/dcn20/irq_service_dcn20.h"
 +#include "dcn20_dpp.h"
 +#include "dcn20_optc.h"
 +#include "dcn20_hwseq.h"
 +#include "dce110/dce110_hw_sequencer.h"
 +#include "dcn10/dcn10_resource.h"
 +#include "dcn20_opp.h"
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +#include "dcn20_dsc.h"
 +#endif
 +
 +#include "dcn20_link_encoder.h"
 +#include "dcn20_stream_encoder.h"
 +#include "dce/dce_clock_source.h"
 +#include "dce/dce_audio.h"
 +#include "dce/dce_hwseq.h"
 +#include "virtual/virtual_stream_encoder.h"
 +#include "dce110/dce110_resource.h"
 +#include "dml/display_mode_vba.h"
 +#include "dcn20_dccg.h"
 +#include "dcn20_vmid.h"
 +
 +#include "navi10_ip_offset.h"
 +
 +#include "dcn/dcn_2_0_0_offset.h"
 +#include "dcn/dcn_2_0_0_sh_mask.h"
 +
 +#include "nbio/nbio_2_3_offset.h"
 +
 +#include "dcn20/dcn20_dwb.h"
 +#include "dcn20/dcn20_mmhubbub.h"
 +
 +#include "mmhub/mmhub_2_0_0_offset.h"
 +#include "mmhub/mmhub_2_0_0_sh_mask.h"
 +
 +#include "reg_helper.h"
 +#include "dce/dce_abm.h"
 +#include "dce/dce_dmcu.h"
 +#include "dce/dce_aux.h"
 +#include "dce/dce_i2c.h"
 +#include "vm_helper.h"
 +
 +#include "amdgpu_socbb.h"
 +
 +#define SOC_BOUNDING_BOX_VALID false
 +#define DC_LOGGER_INIT(logger)
 +
 +struct _vcs_dpi_ip_params_st dcn2_0_ip = {
 +      .odm_capable = 1,
 +      .gpuvm_enable = 0,
 +      .hostvm_enable = 0,
 +      .gpuvm_max_page_table_levels = 4,
 +      .hostvm_max_page_table_levels = 4,
 +      .hostvm_cached_page_table_levels = 0,
 +      .pte_group_size_bytes = 2048,
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      .num_dsc = 6,
 +#else
 +      .num_dsc = 0,
 +#endif
 +      .rob_buffer_size_kbytes = 168,
 +      .det_buffer_size_kbytes = 164,
 +      .dpte_buffer_size_in_pte_reqs_luma = 84,
 +      .pde_proc_buffer_size_64k_reqs = 48,
 +      .dpp_output_buffer_pixels = 2560,
 +      .opp_output_buffer_lines = 1,
 +      .pixel_chunk_size_kbytes = 8,
 +      .pte_chunk_size_kbytes = 2,
 +      .meta_chunk_size_kbytes = 2,
 +      .writeback_chunk_size_kbytes = 2,
 +      .line_buffer_size_bits = 789504,
 +      .is_line_buffer_bpp_fixed = 0,
 +      .line_buffer_fixed_bpp = 0,
 +      .dcc_supported = true,
 +      .max_line_buffer_lines = 12,
 +      .writeback_luma_buffer_size_kbytes = 12,
 +      .writeback_chroma_buffer_size_kbytes = 8,
 +      .writeback_chroma_line_buffer_width_pixels = 4,
 +      .writeback_max_hscl_ratio = 1,
 +      .writeback_max_vscl_ratio = 1,
 +      .writeback_min_hscl_ratio = 1,
 +      .writeback_min_vscl_ratio = 1,
 +      .writeback_max_hscl_taps = 12,
 +      .writeback_max_vscl_taps = 12,
 +      .writeback_line_buffer_luma_buffer_size = 0,
 +      .writeback_line_buffer_chroma_buffer_size = 14643,
 +      .cursor_buffer_size = 8,
 +      .cursor_chunk_size = 2,
 +      .max_num_otg = 6,
 +      .max_num_dpp = 6,
 +      .max_num_wb = 1,
 +      .max_dchub_pscl_bw_pix_per_clk = 4,
 +      .max_pscl_lb_bw_pix_per_clk = 2,
 +      .max_lb_vscl_bw_pix_per_clk = 4,
 +      .max_vscl_hscl_bw_pix_per_clk = 4,
 +      .max_hscl_ratio = 8,
 +      .max_vscl_ratio = 8,
 +      .hscl_mults = 4,
 +      .vscl_mults = 4,
 +      .max_hscl_taps = 8,
 +      .max_vscl_taps = 8,
 +      .dispclk_ramp_margin_percent = 1,
 +      .underscan_factor = 1.10,
 +      .min_vblank_lines = 32, //
 +      .dppclk_delay_subtotal = 77, //
 +      .dppclk_delay_scl_lb_only = 16,
 +      .dppclk_delay_scl = 50,
 +      .dppclk_delay_cnvc_formatter = 8,
 +      .dppclk_delay_cnvc_cursor = 6,
 +      .dispclk_delay_subtotal = 87, //
 +      .dcfclk_cstate_latency = 10, // SRExitTime
 +      .max_inter_dcn_tile_repeaters = 8,
 +
 +      .xfc_supported = true,
 +      .xfc_fill_bw_overhead_percent = 10.0,
 +      .xfc_fill_constant_bytes = 0,
 +};
 +
 +struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { 0 };
 +
 +
 +#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
 +      #define mmDP0_DP_DPHY_INTERNAL_CTRL             0x210f
 +      #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP1_DP_DPHY_INTERNAL_CTRL             0x220f
 +      #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP2_DP_DPHY_INTERNAL_CTRL             0x230f
 +      #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP3_DP_DPHY_INTERNAL_CTRL             0x240f
 +      #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP4_DP_DPHY_INTERNAL_CTRL             0x250f
 +      #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP5_DP_DPHY_INTERNAL_CTRL             0x260f
 +      #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +      #define mmDP6_DP_DPHY_INTERNAL_CTRL             0x270f
 +      #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX    2
 +#endif
 +
 +
 +enum dcn20_clk_src_array_id {
 +      DCN20_CLK_SRC_PLL0,
 +      DCN20_CLK_SRC_PLL1,
 +      DCN20_CLK_SRC_PLL2,
 +      DCN20_CLK_SRC_PLL3,
 +      DCN20_CLK_SRC_PLL4,
 +      DCN20_CLK_SRC_PLL5,
 +      DCN20_CLK_SRC_TOTAL
 +};
 +
 +/* begin *********************
 + * macros to expend register list macro defined in HW object header file */
 +
 +/* DCN */
 +/* TODO awful hack. fixup dcn20_dwb.h */
 +#undef BASE_INNER
 +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
 +
 +#define BASE(seg) BASE_INNER(seg)
 +
 +#define SR(reg_name)\
 +              .reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
 +                                      mm ## reg_name
 +
 +#define SRI(reg_name, block, id)\
 +      .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                                      mm ## block ## id ## _ ## reg_name
 +
 +#define SRIR(var_name, reg_name, block, id)\
 +      .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                                      mm ## block ## id ## _ ## reg_name
 +
 +#define SRII(reg_name, block, id)\
 +      .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                                      mm ## block ## id ## _ ## reg_name
 +
 +#define DCCG_SRII(reg_name, block, id)\
 +      .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                                      mm ## block ## id ## _ ## reg_name
 +
 +/* NBIO */
 +#define NBIO_BASE_INNER(seg) \
 +      NBIO_BASE__INST0_SEG ## seg
 +
 +#define NBIO_BASE(seg) \
 +      NBIO_BASE_INNER(seg)
 +
 +#define NBIO_SR(reg_name)\
 +              .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
 +                                      mm ## reg_name
 +
 +/* MMHUB */
 +#define MMHUB_BASE_INNER(seg) \
 +      MMHUB_BASE__INST0_SEG ## seg
 +
 +#define MMHUB_BASE(seg) \
 +      MMHUB_BASE_INNER(seg)
 +
 +#define MMHUB_SR(reg_name)\
 +              .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
 +                                      mmMM ## reg_name
 +
 +static const struct bios_registers bios_regs = {
 +              NBIO_SR(BIOS_SCRATCH_3),
 +              NBIO_SR(BIOS_SCRATCH_6)
 +};
 +
 +#define clk_src_regs(index, pllid)\
 +[index] = {\
 +      CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
 +}
 +
 +static const struct dce110_clk_src_regs clk_src_regs[] = {
 +      clk_src_regs(0, A),
 +      clk_src_regs(1, B),
 +      clk_src_regs(2, C),
 +      clk_src_regs(3, D),
 +      clk_src_regs(4, E),
 +      clk_src_regs(5, F)
 +};
 +
 +static const struct dce110_clk_src_shift cs_shift = {
 +              CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dce110_clk_src_mask cs_mask = {
 +              CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +static const struct dce_dmcu_registers dmcu_regs = {
 +              DMCU_DCN10_REG_LIST()
 +};
 +
 +static const struct dce_dmcu_shift dmcu_shift = {
 +              DMCU_MASK_SH_LIST_DCN10(__SHIFT)
 +};
 +
 +static const struct dce_dmcu_mask dmcu_mask = {
 +              DMCU_MASK_SH_LIST_DCN10(_MASK)
 +};
 +
 +static const struct dce_abm_registers abm_regs = {
 +              ABM_DCN20_REG_LIST()
 +};
 +
 +static const struct dce_abm_shift abm_shift = {
 +              ABM_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dce_abm_mask abm_mask = {
 +              ABM_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +#define audio_regs(id)\
 +[id] = {\
 +              AUD_COMMON_REG_LIST(id)\
 +}
 +
 +static const struct dce_audio_registers audio_regs[] = {
 +      audio_regs(0),
 +      audio_regs(1),
 +      audio_regs(2),
 +      audio_regs(3),
 +      audio_regs(4),
 +      audio_regs(5),
 +      audio_regs(6),
 +};
 +
 +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
 +              SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
 +              SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
 +              AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
 +
 +static const struct dce_audio_shift audio_shift = {
 +              DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
 +};
 +
 +static const struct dce_aduio_mask audio_mask = {
 +              DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
 +};
 +
 +#define stream_enc_regs(id)\
 +[id] = {\
 +      SE_DCN2_REG_LIST(id)\
 +}
 +
 +static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
 +      stream_enc_regs(0),
 +      stream_enc_regs(1),
 +      stream_enc_regs(2),
 +      stream_enc_regs(3),
 +      stream_enc_regs(4),
 +      stream_enc_regs(5),
 +};
 +
 +static const struct dcn10_stream_encoder_shift se_shift = {
 +              SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn10_stream_encoder_mask se_mask = {
 +              SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +
 +#define aux_regs(id)\
 +[id] = {\
 +      DCN2_AUX_REG_LIST(id)\
 +}
 +
 +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
 +              aux_regs(0),
 +              aux_regs(1),
 +              aux_regs(2),
 +              aux_regs(3),
 +              aux_regs(4),
 +              aux_regs(5)
 +};
 +
 +#define hpd_regs(id)\
 +[id] = {\
 +      HPD_REG_LIST(id)\
 +}
 +
 +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
 +              hpd_regs(0),
 +              hpd_regs(1),
 +              hpd_regs(2),
 +              hpd_regs(3),
 +              hpd_regs(4),
 +              hpd_regs(5)
 +};
 +
 +#define link_regs(id, phyid)\
 +[id] = {\
 +      LE_DCN10_REG_LIST(id), \
 +      UNIPHY_DCN2_REG_LIST(phyid), \
 +      SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
 +}
 +
 +static const struct dcn10_link_enc_registers link_enc_regs[] = {
 +      link_regs(0, A),
 +      link_regs(1, B),
 +      link_regs(2, C),
 +      link_regs(3, D),
 +      link_regs(4, E),
 +      link_regs(5, F)
 +};
 +
 +static const struct dcn10_link_enc_shift le_shift = {
 +      LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn10_link_enc_mask le_mask = {
 +      LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +#define ipp_regs(id)\
 +[id] = {\
 +      IPP_REG_LIST_DCN20(id),\
 +}
 +
 +static const struct dcn10_ipp_registers ipp_regs[] = {
 +      ipp_regs(0),
 +      ipp_regs(1),
 +      ipp_regs(2),
 +      ipp_regs(3),
 +      ipp_regs(4),
 +      ipp_regs(5),
 +};
 +
 +static const struct dcn10_ipp_shift ipp_shift = {
 +              IPP_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn10_ipp_mask ipp_mask = {
 +              IPP_MASK_SH_LIST_DCN20(_MASK),
 +};
 +
 +#define opp_regs(id)\
 +[id] = {\
 +      OPP_REG_LIST_DCN20(id),\
 +}
 +
 +static const struct dcn20_opp_registers opp_regs[] = {
 +      opp_regs(0),
 +      opp_regs(1),
 +      opp_regs(2),
 +      opp_regs(3),
 +      opp_regs(4),
 +      opp_regs(5),
 +};
 +
 +static const struct dcn20_opp_shift opp_shift = {
 +              OPP_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn20_opp_mask opp_mask = {
 +              OPP_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +#define aux_engine_regs(id)\
 +[id] = {\
 +      AUX_COMMON_REG_LIST0(id), \
 +      .AUXN_IMPCAL = 0, \
 +      .AUXP_IMPCAL = 0, \
 +      .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
 +}
 +
 +static const struct dce110_aux_registers aux_engine_regs[] = {
 +              aux_engine_regs(0),
 +              aux_engine_regs(1),
 +              aux_engine_regs(2),
 +              aux_engine_regs(3),
 +              aux_engine_regs(4),
 +              aux_engine_regs(5)
 +};
 +
 +#define tf_regs(id)\
 +[id] = {\
 +      TF_REG_LIST_DCN20(id),\
 +}
 +
 +static const struct dcn2_dpp_registers tf_regs[] = {
 +      tf_regs(0),
 +      tf_regs(1),
 +      tf_regs(2),
 +      tf_regs(3),
 +      tf_regs(4),
 +      tf_regs(5),
 +};
 +
 +static const struct dcn2_dpp_shift tf_shift = {
 +              TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn2_dpp_mask tf_mask = {
 +              TF_REG_LIST_SH_MASK_DCN20(_MASK)
 +};
 +
 +#define dwbc_regs_dcn2(id)\
 +[id] = {\
 +      DWBC_COMMON_REG_LIST_DCN2_0(id),\
 +              }
 +
 +static const struct dcn20_dwbc_registers dwbc20_regs[] = {
 +      dwbc_regs_dcn2(0),
 +};
 +
 +static const struct dcn20_dwbc_shift dwbc20_shift = {
 +      DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dcn20_dwbc_mask dwbc20_mask = {
 +      DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +#define mcif_wb_regs_dcn2(id)\
 +[id] = {\
 +      MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
 +              }
 +
 +static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = {
 +      mcif_wb_regs_dcn2(0),
 +};
 +
 +static const struct dcn20_mmhubbub_shift mcif_wb20_shift = {
 +      MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dcn20_mmhubbub_mask mcif_wb20_mask = {
 +      MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +static const struct dcn20_mpc_registers mpc_regs = {
 +              MPC_REG_LIST_DCN2_0(0),
 +              MPC_REG_LIST_DCN2_0(1),
 +              MPC_REG_LIST_DCN2_0(2),
 +              MPC_REG_LIST_DCN2_0(3),
 +              MPC_REG_LIST_DCN2_0(4),
 +              MPC_REG_LIST_DCN2_0(5),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(0),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(1),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(2),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(3),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(4),
 +              MPC_OUT_MUX_REG_LIST_DCN2_0(5),
 +};
 +
 +static const struct dcn20_mpc_shift mpc_shift = {
 +      MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dcn20_mpc_mask mpc_mask = {
 +      MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +#define tg_regs(id)\
 +[id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
 +
 +
 +static const struct dcn_optc_registers tg_regs[] = {
 +      tg_regs(0),
 +      tg_regs(1),
 +      tg_regs(2),
 +      tg_regs(3),
 +      tg_regs(4),
 +      tg_regs(5)
 +};
 +
 +static const struct dcn_optc_shift tg_shift = {
 +      TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
 +};
 +
 +static const struct dcn_optc_mask tg_mask = {
 +      TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
 +};
 +
 +#define hubp_regs(id)\
 +[id] = {\
 +      HUBP_REG_LIST_DCN20(id)\
 +}
 +
 +static const struct dcn_hubp2_registers hubp_regs[] = {
 +              hubp_regs(0),
 +              hubp_regs(1),
 +              hubp_regs(2),
 +              hubp_regs(3),
 +              hubp_regs(4),
 +              hubp_regs(5)
 +};
 +
 +static const struct dcn_hubp2_shift hubp_shift = {
 +              HUBP_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn_hubp2_mask hubp_mask = {
 +              HUBP_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +static const struct dcn_hubbub_registers hubbub_reg = {
 +              HUBBUB_REG_LIST_DCN20(0)
 +};
 +
 +static const struct dcn_hubbub_shift hubbub_shift = {
 +              HUBBUB_MASK_SH_LIST_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn_hubbub_mask hubbub_mask = {
 +              HUBBUB_MASK_SH_LIST_DCN20(_MASK)
 +};
 +
 +#define vmid_regs(id)\
 +[id] = {\
 +              DCN20_VMID_REG_LIST(id)\
 +}
 +
 +static const struct dcn_vmid_registers vmid_regs[] = {
 +      vmid_regs(0),
 +      vmid_regs(1),
 +      vmid_regs(2),
 +      vmid_regs(3),
 +      vmid_regs(4),
 +      vmid_regs(5),
 +      vmid_regs(6),
 +      vmid_regs(7),
 +      vmid_regs(8),
 +      vmid_regs(9),
 +      vmid_regs(10),
 +      vmid_regs(11),
 +      vmid_regs(12),
 +      vmid_regs(13),
 +      vmid_regs(14),
 +      vmid_regs(15)
 +};
 +
 +static const struct dcn20_vmid_shift vmid_shifts = {
 +              DCN20_VMID_MASK_SH_LIST(__SHIFT)
 +};
 +
 +static const struct dcn20_vmid_mask vmid_masks = {
 +              DCN20_VMID_MASK_SH_LIST(_MASK)
 +};
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +#define dsc_regsDCN20(id)\
 +[id] = {\
 +      DSC_REG_LIST_DCN20(id)\
 +}
 +
 +static const struct dcn20_dsc_registers dsc_regs[] = {
 +      dsc_regsDCN20(0),
 +      dsc_regsDCN20(1),
 +      dsc_regsDCN20(2),
 +      dsc_regsDCN20(3),
 +      dsc_regsDCN20(4),
 +      dsc_regsDCN20(5)
 +};
 +
 +static const struct dcn20_dsc_shift dsc_shift = {
 +      DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
 +};
 +
 +static const struct dcn20_dsc_mask dsc_mask = {
 +      DSC_REG_LIST_SH_MASK_DCN20(_MASK)
 +};
 +#endif
 +
 +static const struct dccg_registers dccg_regs = {
 +              DCCG_REG_LIST_DCN2()
 +};
 +
 +static const struct dccg_shift dccg_shift = {
 +              DCCG_MASK_SH_LIST_DCN2(__SHIFT)
 +};
 +
 +static const struct dccg_mask dccg_mask = {
 +              DCCG_MASK_SH_LIST_DCN2(_MASK)
 +};
 +
 +static const struct resource_caps res_cap_nv10 = {
 +              .num_timing_generator = 6,
 +              .num_opp = 6,
 +              .num_video_plane = 6,
 +              .num_audio = 7,
 +              .num_stream_encoder = 6,
 +              .num_pll = 6,
 +              .num_dwb = 1,
 +              .num_ddc = 6,
 +              .num_vmid = 16,
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              .num_dsc = 6,
 +#endif
 +};
 +
 +static const struct dc_plane_cap plane_cap = {
 +      .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
 +      .blends_with_above = true,
 +      .blends_with_below = true,
 +      .per_pixel_alpha = true,
 +
 +      .pixel_format_support = {
 +                      .argb8888 = true,
 +                      .nv12 = true,
 +                      .fp16 = true
 +      },
 +
 +      .max_upscale_factor = {
 +                      .argb8888 = 16000,
 +                      .nv12 = 16000,
 +                      .fp16 = 1
 +      },
 +
 +      .max_downscale_factor = {
 +                      .argb8888 = 250,
 +                      .nv12 = 250,
 +                      .fp16 = 1
 +      }
 +};
 +
 +static const struct dc_debug_options debug_defaults_drv = {
 +              .disable_dmcu = true,
 +              .force_abm_enable = false,
 +              .timing_trace = false,
 +              .clock_trace = true,
 +              .disable_pplib_clock_request = true,
 +              .pipe_split_policy = MPC_SPLIT_DYNAMIC,
 +              .force_single_disp_pipe_split = true,
 +              .disable_dcc = DCC_ENABLE,
 +              .vsr_support = true,
 +              .performance_trace = false,
 +              .max_downscale_src_width = 5120,/*upto 5K*/
 +              .disable_pplib_wm_range = false,
 +              .scl_reset_length10 = true,
 +              .sanity_checks = false,
 +              .disable_tri_buf = true,
 +              .underflow_assert_delay_us = 0xFFFFFFFF,
 +};
 +
 +static const struct dc_debug_options debug_defaults_diags = {
 +              .disable_dmcu = true,
 +              .force_abm_enable = false,
 +              .timing_trace = true,
 +              .clock_trace = true,
 +              .disable_dpp_power_gate = true,
 +              .disable_hubp_power_gate = true,
 +              .disable_clock_gate = true,
 +              .disable_pplib_clock_request = true,
 +              .disable_pplib_wm_range = true,
 +              .disable_stutter = true,
 +              .scl_reset_length10 = true,
 +              .underflow_assert_delay_us = 0xFFFFFFFF,
 +};
 +
 +void dcn20_dpp_destroy(struct dpp **dpp)
 +{
 +      kfree(TO_DCN20_DPP(*dpp));
 +      *dpp = NULL;
 +}
 +
 +struct dpp *dcn20_dpp_create(
 +      struct dc_context *ctx,
 +      uint32_t inst)
 +{
 +      struct dcn20_dpp *dpp =
 +              kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
 +
 +      if (!dpp)
 +              return NULL;
 +
 +      if (dpp2_construct(dpp, ctx, inst,
 +                      &tf_regs[inst], &tf_shift, &tf_mask))
 +              return &dpp->base;
 +
 +      BREAK_TO_DEBUGGER();
 +      kfree(dpp);
 +      return NULL;
 +}
 +
 +struct input_pixel_processor *dcn20_ipp_create(
 +      struct dc_context *ctx, uint32_t inst)
 +{
 +      struct dcn10_ipp *ipp =
 +              kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
 +
 +      if (!ipp) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      dcn20_ipp_construct(ipp, ctx, inst,
 +                      &ipp_regs[inst], &ipp_shift, &ipp_mask);
 +      return &ipp->base;
 +}
 +
 +
 +struct output_pixel_processor *dcn20_opp_create(
 +      struct dc_context *ctx, uint32_t inst)
 +{
 +      struct dcn20_opp *opp =
 +              kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
 +
 +      if (!opp) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      dcn20_opp_construct(opp, ctx, inst,
 +                      &opp_regs[inst], &opp_shift, &opp_mask);
 +      return &opp->base;
 +}
 +
 +struct dce_aux *dcn20_aux_engine_create(
 +      struct dc_context *ctx,
 +      uint32_t inst)
 +{
 +      struct aux_engine_dce110 *aux_engine =
 +              kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
 +
 +      if (!aux_engine)
 +              return NULL;
 +
 +      dce110_aux_engine_construct(aux_engine, ctx, inst,
 +                                  SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
 +                                  &aux_engine_regs[inst]);
 +
 +      return &aux_engine->base;
 +}
 +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
 +
 +static const struct dce_i2c_registers i2c_hw_regs[] = {
 +              i2c_inst_regs(1),
 +              i2c_inst_regs(2),
 +              i2c_inst_regs(3),
 +              i2c_inst_regs(4),
 +              i2c_inst_regs(5),
 +              i2c_inst_regs(6),
 +};
 +
 +static const struct dce_i2c_shift i2c_shifts = {
 +              I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
 +};
 +
 +static const struct dce_i2c_mask i2c_masks = {
 +              I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
 +};
 +
 +struct dce_i2c_hw *dcn20_i2c_hw_create(
 +      struct dc_context *ctx,
 +      uint32_t inst)
 +{
 +      struct dce_i2c_hw *dce_i2c_hw =
 +              kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
 +
 +      if (!dce_i2c_hw)
 +              return NULL;
 +
 +      dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
 +                                  &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
 +
 +      return dce_i2c_hw;
 +}
 +struct mpc *dcn20_mpc_create(struct dc_context *ctx)
 +{
 +      struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
 +                                        GFP_KERNEL);
 +
 +      if (!mpc20)
 +              return NULL;
 +
 +      dcn20_mpc_construct(mpc20, ctx,
 +                      &mpc_regs,
 +                      &mpc_shift,
 +                      &mpc_mask,
 +                      6);
 +
 +      return &mpc20->base;
 +}
 +
 +struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
 +{
 +      int i;
 +      struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
 +                                        GFP_KERNEL);
 +
 +      if (!hubbub)
 +              return NULL;
 +
 +      hubbub2_construct(hubbub, ctx,
 +                      &hubbub_reg,
 +                      &hubbub_shift,
 +                      &hubbub_mask);
 +
 +      for (i = 0; i < res_cap_nv10.num_vmid; i++) {
 +              struct dcn20_vmid *vmid = &hubbub->vmid[i];
 +
 +              vmid->ctx = ctx;
 +
 +              vmid->regs = &vmid_regs[i];
 +              vmid->shifts = &vmid_shifts;
 +              vmid->masks = &vmid_masks;
 +      }
 +
 +      return &hubbub->base;
 +}
 +
 +struct timing_generator *dcn20_timing_generator_create(
 +              struct dc_context *ctx,
 +              uint32_t instance)
 +{
 +      struct optc *tgn10 =
 +              kzalloc(sizeof(struct optc), GFP_KERNEL);
 +
 +      if (!tgn10)
 +              return NULL;
 +
 +      tgn10->base.inst = instance;
 +      tgn10->base.ctx = ctx;
 +
 +      tgn10->tg_regs = &tg_regs[instance];
 +      tgn10->tg_shift = &tg_shift;
 +      tgn10->tg_mask = &tg_mask;
 +
 +      dcn20_timing_generator_init(tgn10);
 +
 +      return &tgn10->base;
 +}
 +
 +static const struct encoder_feature_support link_enc_feature = {
 +              .max_hdmi_deep_color = COLOR_DEPTH_121212,
 +              .max_hdmi_pixel_clock = 600000,
 +              .hdmi_ycbcr420_supported = true,
 +              .dp_ycbcr420_supported = true,
 +              .flags.bits.IS_HBR2_CAPABLE = true,
 +              .flags.bits.IS_HBR3_CAPABLE = true,
 +              .flags.bits.IS_TPS3_CAPABLE = true,
 +              .flags.bits.IS_TPS4_CAPABLE = true
 +};
 +
 +struct link_encoder *dcn20_link_encoder_create(
 +      const struct encoder_init_data *enc_init_data)
 +{
 +      struct dcn20_link_encoder *enc20 =
 +              kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
 +
 +      if (!enc20)
 +              return NULL;
 +
 +      dcn20_link_encoder_construct(enc20,
 +                                    enc_init_data,
 +                                    &link_enc_feature,
 +                                    &link_enc_regs[enc_init_data->transmitter],
 +                                    &link_enc_aux_regs[enc_init_data->channel - 1],
 +                                    &link_enc_hpd_regs[enc_init_data->hpd_source],
 +                                    &le_shift,
 +                                    &le_mask);
 +
 +      return &enc20->enc10.base;
 +}
 +
 +struct clock_source *dcn20_clock_source_create(
 +      struct dc_context *ctx,
 +      struct dc_bios *bios,
 +      enum clock_source_id id,
 +      const struct dce110_clk_src_regs *regs,
 +      bool dp_clk_src)
 +{
 +      struct dce110_clk_src *clk_src =
 +              kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
 +
 +      if (!clk_src)
 +              return NULL;
 +
 +      if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
 +                      regs, &cs_shift, &cs_mask)) {
 +              clk_src->base.dp_clk_src = dp_clk_src;
 +              return &clk_src->base;
 +      }
 +
 +      BREAK_TO_DEBUGGER();
 +      return NULL;
 +}
 +
 +static void read_dce_straps(
 +      struct dc_context *ctx,
 +      struct resource_straps *straps)
 +{
 +      generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
 +              FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
 +}
 +
 +static struct audio *dcn20_create_audio(
 +              struct dc_context *ctx, unsigned int inst)
 +{
 +      return dce_audio_create(ctx, inst,
 +                      &audio_regs[inst], &audio_shift, &audio_mask);
 +}
 +
 +struct stream_encoder *dcn20_stream_encoder_create(
 +      enum engine_id eng_id,
 +      struct dc_context *ctx)
 +{
 +      struct dcn10_stream_encoder *enc1 =
 +              kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
 +
 +      if (!enc1)
 +              return NULL;
 +
 +      dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
 +                                      &stream_enc_regs[eng_id],
 +                                      &se_shift, &se_mask);
 +
 +      return &enc1->base;
 +}
 +
 +static const struct dce_hwseq_registers hwseq_reg = {
 +              HWSEQ_DCN2_REG_LIST()
 +};
 +
 +static const struct dce_hwseq_shift hwseq_shift = {
 +              HWSEQ_DCN2_MASK_SH_LIST(__SHIFT)
 +};
 +
 +static const struct dce_hwseq_mask hwseq_mask = {
 +              HWSEQ_DCN2_MASK_SH_LIST(_MASK)
 +};
 +
 +struct dce_hwseq *dcn20_hwseq_create(
 +      struct dc_context *ctx)
 +{
 +      struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
 +
 +      if (hws) {
 +              hws->ctx = ctx;
 +              hws->regs = &hwseq_reg;
 +              hws->shifts = &hwseq_shift;
 +              hws->masks = &hwseq_mask;
 +      }
 +      return hws;
 +}
 +
 +static const struct resource_create_funcs res_create_funcs = {
 +      .read_dce_straps = read_dce_straps,
 +      .create_audio = dcn20_create_audio,
 +      .create_stream_encoder = dcn20_stream_encoder_create,
 +      .create_hwseq = dcn20_hwseq_create,
 +};
 +
 +static const struct resource_create_funcs res_create_maximus_funcs = {
 +      .read_dce_straps = NULL,
 +      .create_audio = NULL,
 +      .create_stream_encoder = NULL,
 +      .create_hwseq = dcn20_hwseq_create,
 +};
 +
 +void dcn20_clock_source_destroy(struct clock_source **clk_src)
 +{
 +      kfree(TO_DCE110_CLK_SRC(*clk_src));
 +      *clk_src = NULL;
 +}
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +
 +struct display_stream_compressor *dcn20_dsc_create(
 +      struct dc_context *ctx, uint32_t inst)
 +{
 +      struct dcn20_dsc *dsc =
 +              kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
 +
 +      if (!dsc) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
 +      return &dsc->base;
 +}
 +
 +void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
 +{
 +      kfree(container_of(*dsc, struct dcn20_dsc, base));
 +      *dsc = NULL;
 +}
 +
 +#endif
 +
 +static void destruct(struct dcn20_resource_pool *pool)
 +{
 +      unsigned int i;
 +
 +      for (i = 0; i < pool->base.stream_enc_count; i++) {
 +              if (pool->base.stream_enc[i] != NULL) {
 +                      kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
 +                      pool->base.stream_enc[i] = NULL;
 +              }
 +      }
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
 +              if (pool->base.dscs[i] != NULL)
 +                      dcn20_dsc_destroy(&pool->base.dscs[i]);
 +      }
 +#endif
 +
 +      if (pool->base.mpc != NULL) {
 +              kfree(TO_DCN20_MPC(pool->base.mpc));
 +              pool->base.mpc = NULL;
 +      }
 +      if (pool->base.hubbub != NULL) {
 +              kfree(pool->base.hubbub);
 +              pool->base.hubbub = NULL;
 +      }
 +      for (i = 0; i < pool->base.pipe_count; i++) {
 +              if (pool->base.dpps[i] != NULL)
 +                      dcn20_dpp_destroy(&pool->base.dpps[i]);
 +
 +              if (pool->base.ipps[i] != NULL)
 +                      pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
 +
 +              if (pool->base.hubps[i] != NULL) {
 +                      kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
 +                      pool->base.hubps[i] = NULL;
 +              }
 +
 +              if (pool->base.irqs != NULL) {
 +                      dal_irq_service_destroy(&pool->base.irqs);
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
 +              if (pool->base.engines[i] != NULL)
 +                      dce110_engine_destroy(&pool->base.engines[i]);
 +              if (pool->base.hw_i2cs[i] != NULL) {
 +                      kfree(pool->base.hw_i2cs[i]);
 +                      pool->base.hw_i2cs[i] = NULL;
 +              }
 +              if (pool->base.sw_i2cs[i] != NULL) {
 +                      kfree(pool->base.sw_i2cs[i]);
 +                      pool->base.sw_i2cs[i] = NULL;
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_opp; i++) {
 +              if (pool->base.opps[i] != NULL)
 +                      pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
 +              if (pool->base.timing_generators[i] != NULL)    {
 +                      kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
 +                      pool->base.timing_generators[i] = NULL;
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
 +              if (pool->base.dwbc[i] != NULL) {
 +                      kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
 +                      pool->base.dwbc[i] = NULL;
 +              }
 +              if (pool->base.mcif_wb[i] != NULL) {
 +                      kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
 +                      pool->base.mcif_wb[i] = NULL;
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.audio_count; i++) {
 +              if (pool->base.audios[i])
 +                      dce_aud_destroy(&pool->base.audios[i]);
 +      }
 +
 +      for (i = 0; i < pool->base.clk_src_count; i++) {
 +              if (pool->base.clock_sources[i] != NULL) {
 +                      dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
 +                      pool->base.clock_sources[i] = NULL;
 +              }
 +      }
 +
 +      if (pool->base.dp_clock_source != NULL) {
 +              dcn20_clock_source_destroy(&pool->base.dp_clock_source);
 +              pool->base.dp_clock_source = NULL;
 +      }
 +
 +
 +      if (pool->base.abm != NULL)
 +              dce_abm_destroy(&pool->base.abm);
 +
 +      if (pool->base.dmcu != NULL)
 +              dce_dmcu_destroy(&pool->base.dmcu);
 +
 +      if (pool->base.dccg != NULL)
 +              dcn_dccg_destroy(&pool->base.dccg);
 +
 +      if (pool->base.pp_smu != NULL)
 +              dcn20_pp_smu_destroy(&pool->base.pp_smu);
 +
 +}
 +
 +struct hubp *dcn20_hubp_create(
 +      struct dc_context *ctx,
 +      uint32_t inst)
 +{
 +      struct dcn20_hubp *hubp2 =
 +              kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
 +
 +      if (!hubp2)
 +              return NULL;
 +
 +      if (hubp2_construct(hubp2, ctx, inst,
 +                      &hubp_regs[inst], &hubp_shift, &hubp_mask))
 +              return &hubp2->base;
 +
 +      BREAK_TO_DEBUGGER();
 +      kfree(hubp2);
 +      return NULL;
 +}
 +
 +static void get_pixel_clock_parameters(
 +      struct pipe_ctx *pipe_ctx,
 +      struct pixel_clk_params *pixel_clk_params)
 +{
 +      const struct dc_stream_state *stream = pipe_ctx->stream;
 +      bool odm_combine = dc_res_get_odm_bottom_pipe(pipe_ctx) != NULL;
 +
 +      pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
 +      pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
 +      pixel_clk_params->signal_type = pipe_ctx->stream->signal;
 +      pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
 +      /* TODO: un-hardcode*/
 +      pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
 +              LINK_RATE_REF_FREQ_IN_KHZ;
 +      pixel_clk_params->flags.ENABLE_SS = 0;
 +      pixel_clk_params->color_depth =
 +              stream->timing.display_color_depth;
 +      pixel_clk_params->flags.DISPLAY_BLANKED = 1;
 +      pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
 +
 +      if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
 +              pixel_clk_params->color_depth = COLOR_DEPTH_888;
 +
 +      if (optc1_is_two_pixels_per_containter(&stream->timing) || odm_combine)
 +              pixel_clk_params->requested_pix_clk_100hz /= 2;
 +
 +      if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
 +              pixel_clk_params->requested_pix_clk_100hz *= 2;
 +
 +}
 +
 +static void build_clamping_params(struct dc_stream_state *stream)
 +{
 +      stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
 +      stream->clamping.c_depth = stream->timing.display_color_depth;
 +      stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
 +}
 +
 +static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
 +{
 +
 +      get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
 +
 +      pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
 +              pipe_ctx->clock_source,
 +              &pipe_ctx->stream_res.pix_clk_params,
 +              &pipe_ctx->pll_settings);
 +
 +      pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
 +
 +      resource_build_bit_depth_reduction_params(pipe_ctx->stream,
 +                                      &pipe_ctx->stream->bit_depth_params);
 +      build_clamping_params(pipe_ctx->stream);
 +
 +      return DC_OK;
 +}
 +
 +enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
 +{
 +      enum dc_status status = DC_OK;
 +      struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
 +
 +      /*TODO Seems unneeded anymore */
 +      /*      if (old_context && resource_is_stream_unchanged(old_context, stream)) {
 +                      if (stream != NULL && old_context->streams[i] != NULL) {
 +                               todo: shouldn't have to copy missing parameter here
 +                              resource_build_bit_depth_reduction_params(stream,
 +                                              &stream->bit_depth_params);
 +                              stream->clamping.pixel_encoding =
 +                                              stream->timing.pixel_encoding;
 +
 +                              resource_build_bit_depth_reduction_params(stream,
 +                                                              &stream->bit_depth_params);
 +                              build_clamping_params(stream);
 +
 +                              continue;
 +                      }
 +              }
 +      */
 +
 +      if (!pipe_ctx)
 +              return DC_ERROR_UNEXPECTED;
 +
 +
 +      status = build_pipe_hw_param(pipe_ctx);
 +
 +      return status;
 +}
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +
 +static void acquire_dsc(struct resource_context *res_ctx,
 +                      const struct resource_pool *pool,
 +                      struct display_stream_compressor **dsc)
 +{
 +      int i;
 +
 +      ASSERT(*dsc == NULL);
 +      *dsc = NULL;
 +
 +      /* Find first free DSC */
 +      for (i = 0; i < pool->res_cap->num_dsc; i++)
 +              if (!res_ctx->is_dsc_acquired[i]) {
 +                      *dsc = pool->dscs[i];
 +                      res_ctx->is_dsc_acquired[i] = true;
 +                      break;
 +              }
 +}
 +
 +static void release_dsc(struct resource_context *res_ctx,
 +                      const struct resource_pool *pool,
 +                      struct display_stream_compressor **dsc)
 +{
 +      int i;
 +
 +      for (i = 0; i < pool->res_cap->num_dsc; i++)
 +              if (pool->dscs[i] == *dsc) {
 +                      res_ctx->is_dsc_acquired[i] = false;
 +                      *dsc = NULL;
 +                      break;
 +              }
 +}
 +
 +#endif
 +
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
 +              struct dc_state *dc_ctx,
 +              struct dc_stream_state *dc_stream)
 +{
 +      enum dc_status result = DC_OK;
 +      int i;
 +      const struct resource_pool *pool = dc->res_pool;
 +
 +      /* Get a DSC if required and available */
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
 +
 +              if (pipe_ctx->stream != dc_stream)
 +                      continue;
 +
 +              acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
 +
 +              /* The number of DSCs can be less than the number of pipes */
 +              if (!pipe_ctx->stream_res.dsc) {
 +                      dm_output_to_console("No DSCs available\n");
 +                      result = DC_NO_DSC_RESOURCE;
 +              }
 +
 +              break;
 +      }
 +
 +      return result;
 +}
 +
 +
 +static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
 +              struct dc_state *new_ctx,
 +              struct dc_stream_state *dc_stream)
 +{
 +      struct pipe_ctx *pipe_ctx = NULL;
 +      int i;
 +
 +      for (i = 0; i < MAX_PIPES; i++) {
 +              if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
 +                      pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
 +                      break;
 +              }
 +      }
 +
 +      if (!pipe_ctx)
 +              return DC_ERROR_UNEXPECTED;
 +
 +      if (pipe_ctx->stream_res.dsc) {
 +              struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
 +
 +              release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
 +              if (odm_pipe)
 +                      release_dsc(&new_ctx->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
 +      }
 +
 +      return DC_OK;
 +}
 +#endif
 +
 +
 +enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
 +{
 +      enum dc_status result = DC_ERROR_UNEXPECTED;
 +
 +      result = resource_map_pool_resources(dc, new_ctx, dc_stream);
 +
 +      if (result == DC_OK)
 +              result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      /* Get a DSC if required and available */
 +      if (result == DC_OK && dc_stream->timing.flags.DSC)
 +              result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
 +#endif
 +
 +      if (result == DC_OK)
 +              result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
 +
 +      return result;
 +}
 +
 +
 +enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
 +{
 +      enum dc_status result = DC_OK;
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
 +#endif
 +
 +      return result;
 +}
 +
 +
 +static void swizzle_to_dml_params(
 +              enum swizzle_mode_values swizzle,
 +              unsigned int *sw_mode)
 +{
 +      switch (swizzle) {
 +      case DC_SW_LINEAR:
 +              *sw_mode = dm_sw_linear;
 +              break;
 +      case DC_SW_4KB_S:
 +              *sw_mode = dm_sw_4kb_s;
 +              break;
 +      case DC_SW_4KB_S_X:
 +              *sw_mode = dm_sw_4kb_s_x;
 +              break;
 +      case DC_SW_4KB_D:
 +              *sw_mode = dm_sw_4kb_d;
 +              break;
 +      case DC_SW_4KB_D_X:
 +              *sw_mode = dm_sw_4kb_d_x;
 +              break;
 +      case DC_SW_64KB_S:
 +              *sw_mode = dm_sw_64kb_s;
 +              break;
 +      case DC_SW_64KB_S_X:
 +              *sw_mode = dm_sw_64kb_s_x;
 +              break;
 +      case DC_SW_64KB_S_T:
 +              *sw_mode = dm_sw_64kb_s_t;
 +              break;
 +      case DC_SW_64KB_D:
 +              *sw_mode = dm_sw_64kb_d;
 +              break;
 +      case DC_SW_64KB_D_X:
 +              *sw_mode = dm_sw_64kb_d_x;
 +              break;
 +      case DC_SW_64KB_D_T:
 +              *sw_mode = dm_sw_64kb_d_t;
 +              break;
 +      case DC_SW_64KB_R_X:
 +              *sw_mode = dm_sw_64kb_r_x;
 +              break;
 +      case DC_SW_VAR_S:
 +              *sw_mode = dm_sw_var_s;
 +              break;
 +      case DC_SW_VAR_S_X:
 +              *sw_mode = dm_sw_var_s_x;
 +              break;
 +      case DC_SW_VAR_D:
 +              *sw_mode = dm_sw_var_d;
 +              break;
 +      case DC_SW_VAR_D_X:
 +              *sw_mode = dm_sw_var_d_x;
 +              break;
 +
 +      default:
 +              ASSERT(0); /* Not supported */
 +              break;
 +      }
 +}
 +
 +static bool dcn20_split_stream_for_combine(
 +              struct resource_context *res_ctx,
 +              const struct resource_pool *pool,
 +              struct pipe_ctx *primary_pipe,
 +              struct pipe_ctx *secondary_pipe,
 +              bool is_odm_combine)
 +{
 +      int pipe_idx = secondary_pipe->pipe_idx;
 +      struct scaler_data *sd = &primary_pipe->plane_res.scl_data;
 +      struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
 +      int new_width;
 +
 +      *secondary_pipe = *primary_pipe;
 +      secondary_pipe->bottom_pipe = sec_bot_pipe;
 +
 +      secondary_pipe->pipe_idx = pipe_idx;
 +      secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
 +      secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      secondary_pipe->stream_res.dsc = NULL;
 +#endif
 +      if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
 +              ASSERT(!secondary_pipe->bottom_pipe);
 +              secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
 +              secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
 +      }
 +      primary_pipe->bottom_pipe = secondary_pipe;
 +      secondary_pipe->top_pipe = primary_pipe;
 +
 +      if (is_odm_combine) {
 +              if (primary_pipe->plane_state) {
 +                      /* HACTIVE halved for odm combine */
 +                      sd->h_active /= 2;
 +                      /* Copy scl_data to secondary pipe */
 +                      secondary_pipe->plane_res.scl_data = *sd;
 +
 +                      /* Calculate new vp and recout for left pipe */
 +                      /* Need at least 16 pixels width per side */
 +                      if (sd->recout.x + 16 >= sd->h_active)
 +                              return false;
 +                      new_width = sd->h_active - sd->recout.x;
 +                      sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz, sd->recout.width - new_width));
 +                      sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz_c, sd->recout.width - new_width));
 +                      sd->recout.width = new_width;
 +
 +                      /* Calculate new vp and recout for right pipe */
 +                      sd = &secondary_pipe->plane_res.scl_data;
 +                      new_width = sd->recout.width + sd->recout.x - sd->h_active;
 +                      /* Need at least 16 pixels width per side */
 +                      if (new_width <= 16)
 +                              return false;
 +                      sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz, sd->recout.width - new_width));
 +                      sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz_c, sd->recout.width - new_width));
 +                      sd->recout.width = new_width;
 +                      sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz, sd->h_active - sd->recout.x));
 +                      sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
 +                                      sd->ratios.horz_c, sd->h_active - sd->recout.x));
 +                      sd->recout.x = 0;
 +              }
 +              secondary_pipe->stream_res.opp = pool->opps[secondary_pipe->pipe_idx];
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              if (secondary_pipe->stream->timing.flags.DSC == 1) {
 +                      acquire_dsc(res_ctx, pool, &secondary_pipe->stream_res.dsc);
 +                      ASSERT(secondary_pipe->stream_res.dsc);
 +                      if (secondary_pipe->stream_res.dsc == NULL)
 +                              return false;
 +              }
 +#endif
 +      } else {
 +              ASSERT(primary_pipe->plane_state);
 +              resource_build_scaling_params(primary_pipe);
 +              resource_build_scaling_params(secondary_pipe);
 +      }
 +
 +      return true;
 +}
 +
 +void dcn20_populate_dml_writeback_from_context(
 +              struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
 +{
 +      int pipe_cnt, i;
 +
 +      for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
 +
 +              if (!res_ctx->pipe_ctx[i].stream)
 +                      continue;
 +
 +              /* Set writeback information */
 +              pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
 +              pipes[pipe_cnt].dout.num_active_wb++;
 +              pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
 +              pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
 +              pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
 +              pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
 +              pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
 +              pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
 +              pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
 +              pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
 +              pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
 +              pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
 +              if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
 +                      if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
 +                              pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
 +                      else
 +                              pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
 +              } else
 +                      pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
 +
 +              pipe_cnt++;
 +      }
 +
 +}
 +
 +int dcn20_populate_dml_pipes_from_context(
 +              struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
 +{
 +      int pipe_cnt, i;
 +      bool synchronized_vblank = true;
 +
 +      for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
 +              if (!res_ctx->pipe_ctx[i].stream)
 +                      continue;
 +
 +              if (pipe_cnt < 0) {
 +                      pipe_cnt = i;
 +                      continue;
 +              }
 +              if (!resource_are_streams_timing_synchronizable(
 +                              res_ctx->pipe_ctx[pipe_cnt].stream,
 +                              res_ctx->pipe_ctx[i].stream)) {
 +                      synchronized_vblank = false;
 +                      break;
 +              }
 +      }
 +
 +      for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
 +              int output_bpc;
 +
 +              if (!res_ctx->pipe_ctx[i].stream)
 +                      continue;
 +              /* todo:
 +              pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
 +              pipes[pipe_cnt].pipe.src.dcc = 0;
 +              pipes[pipe_cnt].pipe.src.vm = 0;*/
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
 +              /* todo: rotation?*/
 +              pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
 +#endif
 +              if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
 +                      pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
 +                      /* 1/2 vblank */
 +                      pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
 +                              (timing->v_total - timing->v_addressable
 +                                      - timing->v_border_top - timing->v_border_bottom) / 2;
 +                      /* 36 bytes dp, 32 hdmi */
 +                      pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
 +                              dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
 +              }
 +              pipes[pipe_cnt].pipe.src.dcc = false;
 +              pipes[pipe_cnt].pipe.src.dcc_rate = 1;
 +              pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
 +              pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch;
 +              pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start
 +                              - timing->h_addressable
 +                              - timing->h_border_left
 +                              - timing->h_border_right;
 +              pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
 +              pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
 +                              - timing->v_addressable
 +                              - timing->v_border_top
 +                              - timing->v_border_bottom;
 +              pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
 +              pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
 +              pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
 +              pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
 +              pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
 +              pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
 +              if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
 +                      pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
 +              pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
 +              pipes[pipe_cnt].dout.dp_lanes = 4;
 +              pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
 +              pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
 +
 +              switch (res_ctx->pipe_ctx[i].stream->signal) {
 +              case SIGNAL_TYPE_DISPLAY_PORT_MST:
 +              case SIGNAL_TYPE_DISPLAY_PORT:
 +                      pipes[pipe_cnt].dout.output_type = dm_dp;
 +                      break;
 +              case SIGNAL_TYPE_EDP:
 +                      pipes[pipe_cnt].dout.output_type = dm_edp;
 +                      break;
 +              case SIGNAL_TYPE_HDMI_TYPE_A:
 +              case SIGNAL_TYPE_DVI_SINGLE_LINK:
 +              case SIGNAL_TYPE_DVI_DUAL_LINK:
 +                      pipes[pipe_cnt].dout.output_type = dm_hdmi;
 +                      break;
 +              default:
 +                      /* In case there is no signal, set dp with 4 lanes to allow max config */
 +                      pipes[pipe_cnt].dout.output_type = dm_dp;
 +                      pipes[pipe_cnt].dout.dp_lanes = 4;
 +              }
 +
 +              switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) {
 +              case COLOR_DEPTH_666:
 +                      output_bpc = 6;
 +                      break;
 +              case COLOR_DEPTH_888:
 +                      output_bpc = 8;
 +                      break;
 +              case COLOR_DEPTH_101010:
 +                      output_bpc = 10;
 +                      break;
 +              case COLOR_DEPTH_121212:
 +                      output_bpc = 12;
 +                      break;
 +              case COLOR_DEPTH_141414:
 +                      output_bpc = 14;
 +                      break;
 +              case COLOR_DEPTH_161616:
 +                      output_bpc = 16;
 +                      break;
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +              case COLOR_DEPTH_999:
 +                      output_bpc = 9;
 +                      break;
 +              case COLOR_DEPTH_111111:
 +                      output_bpc = 11;
 +                      break;
 +#endif
 +              default:
 +                      output_bpc = 8;
 +                      break;
 +              }
 +
 +
 +              switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) {
 +              case PIXEL_ENCODING_RGB:
 +              case PIXEL_ENCODING_YCBCR444:
 +                      pipes[pipe_cnt].dout.output_format = dm_444;
 +                      pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
 +                      break;
 +              case PIXEL_ENCODING_YCBCR420:
 +                      pipes[pipe_cnt].dout.output_format = dm_420;
 +                      pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
 +                      break;
 +              case PIXEL_ENCODING_YCBCR422:
 +                      if (true) /* todo */
 +                              pipes[pipe_cnt].dout.output_format = dm_s422;
 +                      else
 +                              pipes[pipe_cnt].dout.output_format = dm_n422;
 +                      pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
 +                      break;
 +              default:
 +                      pipes[pipe_cnt].dout.output_format = dm_444;
 +                      pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
 +              }
 +              pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
 +              if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
 +                              == res_ctx->pipe_ctx[i].plane_state)
 +                      pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
 +
 +              /* todo: default max for now, until there is logic reflecting this in dc*/
 +              pipes[pipe_cnt].dout.output_bpc = 12;
 +              /*
 +               * Use max cursor settings for calculations to minimize
 +               * bw calculations due to cursor on/off
 +               */
 +              pipes[pipe_cnt].pipe.src.num_cursors = 2;
 +              pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
 +              pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
 +              pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
 +              pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
 +
 +              if (!res_ctx->pipe_ctx[i].plane_state) {
 +                      pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
 +                      pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
 +                      pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
 +                      pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
 +                      if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
 +                              pipes[pipe_cnt].pipe.src.viewport_width = 1920;
 +                      pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
 +                      if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
 +                              pipes[pipe_cnt].pipe.src.viewport_height = 1080;
 +                      pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
 +                      pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
 +                      pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
 +                      pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
 +                      pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width;  /*when is_hsplit != 1*/
 +                      pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0;
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0;
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
 +                      pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
 +                      pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
 +                      pipes[pipe_cnt].pipe.src.is_hsplit = 0;
 +                      pipes[pipe_cnt].pipe.dest.odm_combine = 0;
 +                      pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
 +                      pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
 +              } else {
 +                      struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
 +                      struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
 +
 +                      pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
 +                      pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
 +                                      && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
 +                                      || (res_ctx->pipe_ctx[i].top_pipe
 +                                      && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
 +                      pipes[pipe_cnt].pipe.dest.odm_combine = (res_ctx->pipe_ctx[i].bottom_pipe
 +                                      && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln
 +                                      && res_ctx->pipe_ctx[i].bottom_pipe->stream_res.opp
 +                                              != res_ctx->pipe_ctx[i].stream_res.opp)
 +                              || (res_ctx->pipe_ctx[i].top_pipe
 +                                      && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln
 +                                      && res_ctx->pipe_ctx[i].top_pipe->stream_res.opp
 +                                              != res_ctx->pipe_ctx[i].stream_res.opp);
 +                      pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
 +                                      || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
 +                      pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
 +                      pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
 +                      pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
 +                      pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
 +                      pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
 +                      pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
 +                      if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
 +                              pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.video.luma_pitch;
 +                              pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.video.chroma_pitch;
 +                              pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.video.meta_pitch_l;
 +                              pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.video.meta_pitch_c;
 +                      } else {
 +                              pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.grph.surface_pitch;
 +                              pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.grph.meta_pitch;
 +                      }
 +                      pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
 +                      pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
 +                      pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
 +                      pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
 +                      pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
 +                      if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
 +                              pipes[pipe_cnt].pipe.dest.full_recout_width +=
 +                                              res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
 +                              pipes[pipe_cnt].pipe.dest.full_recout_height +=
 +                                              res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
 +                      } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
 +                              pipes[pipe_cnt].pipe.dest.full_recout_width +=
 +                                              res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
 +                              pipes[pipe_cnt].pipe.dest.full_recout_height +=
 +                                              res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
 +                      }
 +
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
 +                      pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable =
 +                                      scl->ratios.vert.value != dc_fixpt_one.value
 +                                      || scl->ratios.horz.value != dc_fixpt_one.value
 +                                      || scl->ratios.vert_c.value != dc_fixpt_one.value
 +                                      || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
 +                                      || dc->debug.always_scale; /*support always scale*/
 +                      pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps;
 +                      pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
 +                      pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps;
 +                      pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
 +
 +                      pipes[pipe_cnt].pipe.src.macro_tile_size =
 +                                      swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
 +                      swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
 +                                      &pipes[pipe_cnt].pipe.src.sw_mode);
 +
 +                      switch (pln->format) {
 +                      case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
 +                      case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_420_8;
 +                              break;
 +                      case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
 +                      case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_420_10;
 +                              break;
 +                      case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
 +                      case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
 +                      case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_444_64;
 +                              break;
 +                      case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
 +                      case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_444_16;
 +                              break;
 +                      case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_444_8;
 +                              break;
 +                      default:
 +                              pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
 +                              break;
 +                      }
 +              }
 +
 +              pipe_cnt++;
 +      }
 +
 +      /* populate writeback information */
 +      dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
 +
 +      return pipe_cnt;
 +}
 +
 +unsigned int dcn20_calc_max_scaled_time(
 +              unsigned int time_per_pixel,
 +              enum mmhubbub_wbif_mode mode,
 +              unsigned int urgent_watermark)
 +{
 +      unsigned int time_per_byte = 0;
 +      unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */
 +      unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */
 +      unsigned int small_free_entry, max_free_entry;
 +      unsigned int buf_lh_capability;
 +      unsigned int max_scaled_time;
 +
 +      if (mode == PACKED_444) /* packed mode */
 +              time_per_byte = time_per_pixel/4;
 +      else if (mode == PLANAR_420_8BPC)
 +              time_per_byte  = time_per_pixel;
 +      else if (mode == PLANAR_420_10BPC) /* p010 */
 +              time_per_byte  = time_per_pixel * 819/1024;
 +
 +      if (time_per_byte == 0)
 +              time_per_byte = 1;
 +
 +      small_free_entry  = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry;
 +      max_free_entry    = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry;
 +      buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */
 +      max_scaled_time   = buf_lh_capability - urgent_watermark;
 +      return max_scaled_time;
 +}
 +
 +void dcn20_set_mcif_arb_params(
 +              struct dc *dc,
 +              struct dc_state *context,
 +              display_e2e_pipe_params_st *pipes,
 +              int pipe_cnt)
 +{
 +      enum mmhubbub_wbif_mode wbif_mode;
 +      struct mcif_arb_params *wb_arb_params;
 +      int i, j, k, dwb_pipe;
 +
 +      /* Writeback MCIF_WB arbitration parameters */
 +      dwb_pipe = 0;
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +
 +              for (j = 0; j < MAX_DWB_PIPES; j++) {
 +                      if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false)
 +                              continue;
 +
 +                      //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
 +                      wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
 +
 +                      if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) {
 +                              if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
 +                                      wbif_mode = PLANAR_420_8BPC;
 +                              else
 +                                      wbif_mode = PLANAR_420_10BPC;
 +                      } else
 +                              wbif_mode = PACKED_444;
 +
 +                      for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
 +                              wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +                              wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +                      }
 +                      wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
 +                      wb_arb_params->slice_lines = 32;
 +                      wb_arb_params->arbitration_slice = 2;
 +                      wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
 +                              wbif_mode,
 +                              wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
 +
 +                      dwb_pipe++;
 +
 +                      if (dwb_pipe >= MAX_DWB_PIPES)
 +                              return;
 +              }
 +              if (dwb_pipe >= MAX_DWB_PIPES)
 +                      return;
 +      }
 +}
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
 +{
 +      int i;
 +
 +      /* Validate DSC config, dsc count validation is already done */
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
 +              struct dc_stream_state *stream = pipe_ctx->stream;
 +              struct dsc_config dsc_cfg;
 +
 +              /* Only need to validate top pipe */
 +              if (pipe_ctx->top_pipe || !stream || !stream->timing.flags.DSC)
 +                      continue;
 +
 +              dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left
 +                              + stream->timing.h_border_right;
 +              dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
 +                              + stream->timing.v_border_bottom;
 +              if (dc_res_get_odm_bottom_pipe(pipe_ctx))
 +                      dsc_cfg.pic_width /= 2;
 +              dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
 +              dsc_cfg.color_depth = stream->timing.display_color_depth;
 +              dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
 +
 +              if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
 +                      return false;
 +      }
 +      return true;
 +}
 +#endif
 +
 +bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
 +              bool fast_validate)
 +{
 +      bool out = false;
 +
 +      BW_VAL_TRACE_SETUP();
 +
 +      int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
 +      int pipe_split_from[MAX_PIPES];
 +      bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
 +      bool force_split = false;
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      bool failed_non_odm_dsc = false;
 +#endif
 +      int split_threshold = dc->res_pool->pipe_count / 2;
 +      bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
 +      display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
 +      DC_LOGGER_INIT(dc->ctx->logger);
 +
 +      BW_VAL_TRACE_COUNT();
 +
 +      ASSERT(pipes);
 +      if (!pipes)
 +              return false;
 +
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 +              struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
 +
 +              if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
 +                      continue;
 +
 +              /* merge previously split pipe since mode support needs to make the decision */
 +              pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
 +              if (hsplit_pipe->bottom_pipe)
 +                      hsplit_pipe->bottom_pipe->top_pipe = pipe;
 +              hsplit_pipe->plane_state = NULL;
 +              hsplit_pipe->stream = NULL;
 +              hsplit_pipe->top_pipe = NULL;
 +              hsplit_pipe->bottom_pipe = NULL;
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              if (hsplit_pipe->stream_res.dsc && hsplit_pipe->stream_res.dsc != pipe->stream_res.dsc)
 +                      release_dsc(&context->res_ctx, dc->res_pool, &hsplit_pipe->stream_res.dsc);
 +#endif
 +              /* Clear plane_res and stream_res */
 +              memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
 +              memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
 +              if (pipe->plane_state)
 +                      resource_build_scaling_params(pipe);
 +      }
 +
 +      if (dc->res_pool->funcs->populate_dml_pipes)
 +              pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
 +                      &context->res_ctx, pipes);
 +      else
 +              pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
 +                      &context->res_ctx, pipes);
 +
 +      if (!pipe_cnt) {
 +              BW_VAL_TRACE_SKIP(pass);
 +              out = true;
 +              goto validate_out;
 +      }
 +
 +      context->bw_ctx.dml.ip.odm_capable = 0;
 +
 +      vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
 +
 +      context->bw_ctx.dml.ip.odm_capable = odm_capable;
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      /* 1 dsc per stream dsc validation */
 +      if (vlevel <= context->bw_ctx.dml.soc.num_states)
 +              if (!dcn20_validate_dsc(dc, context)) {
 +                      failed_non_odm_dsc = true;
 +                      vlevel = context->bw_ctx.dml.soc.num_states + 1;
 +              }
 +#endif
 +
 +      if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
 +              vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
 +
 +      if (vlevel > context->bw_ctx.dml.soc.num_states)
 +              goto validate_fail;
 +
 +      if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold)
 +              || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold))
 +              context->commit_hints.full_update_needed = true;
 +
 +      /*initialize pipe_just_split_from to invalid idx*/
 +      for (i = 0; i < MAX_PIPES; i++)
 +              pipe_split_from[i] = -1;
 +
 +      /* Single display only conditionals get set here */
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 +              bool exit_loop = false;
 +
 +              if (!pipe->stream || pipe->top_pipe)
 +                      continue;
 +
 +              if (dc->debug.force_single_disp_pipe_split) {
 +                      if (!force_split)
 +                              force_split = true;
 +                      else {
 +                              force_split = false;
 +                              exit_loop = true;
 +                      }
 +              }
 +              if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
 +                      if (avoid_split)
 +                              avoid_split = false;
 +                      else {
 +                              avoid_split = true;
 +                              exit_loop = true;
 +                      }
 +              }
 +              if (exit_loop)
 +                      break;
 +      }
 +
 +      if (context->stream_count > split_threshold)
 +              avoid_split = true;
 +
 +      vlevel_unsplit = vlevel;
 +      for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +              for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++)
 +                      if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1)
 +                              break;
 +              pipe_idx++;
 +      }
 +
 +      for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 +              struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
 +              bool need_split = true;
 +              bool need_split3d;
 +
 +              if (!pipe->stream || pipe_split_from[i] >= 0)
 +                      continue;
 +
 +              pipe_idx++;
 +
 +              if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
 +                      force_split = true;
 +                      context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true;
 +                      context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
 +              }
 +              if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
 +                      context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
 +              if (dc->config.forced_clocks == true) {
 +                      context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] =
 +                                      context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
 +              }
 +              if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
 +                      hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
 +                      ASSERT(hsplit_pipe);
 +                      if (!dcn20_split_stream_for_combine(
 +                                      &context->res_ctx, dc->res_pool,
 +                                      pipe, hsplit_pipe,
 +                                      true))
 +                              goto validate_fail;
 +                      pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
 +                      dcn20_build_mapped_resource(dc, context, pipe->stream);
 +              }
 +
 +              if (!pipe->plane_state)
 +                      continue;
 +              /* Skip 2nd half of already split pipe */
 +              if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
 +                      continue;
 +
 +              need_split3d = ((pipe->stream->view_format ==
 +                              VIEW_3D_FORMAT_SIDE_BY_SIDE ||
 +                              pipe->stream->view_format ==
 +                              VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
 +                              (pipe->stream->timing.timing_3d_format ==
 +                              TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
 +                               pipe->stream->timing.timing_3d_format ==
 +                              TIMING_3D_FORMAT_SIDE_BY_SIDE));
 +
 +              if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) {
 +                      need_split = false;
 +                      vlevel = vlevel_unsplit;
 +                      context->bw_ctx.dml.vba.maxMpcComb = 0;
 +              } else
 +                      need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2;
 +
 +              /* We do not support mpo + odm at the moment */
 +              if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
 +                              && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
 +                      goto validate_fail;
 +
 +              if (need_split3d || need_split || force_split) {
 +                      if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
 +                              /* pipe not split previously needs split */
 +                              hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
 +                              ASSERT(hsplit_pipe || force_split);
 +                              if (!hsplit_pipe)
 +                                      continue;
 +
 +                              if (!dcn20_split_stream_for_combine(
 +                                              &context->res_ctx, dc->res_pool,
 +                                              pipe, hsplit_pipe,
 +                                              context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]))
 +                                      goto validate_fail;
 +                              pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
 +                      }
 +              } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
 +                      /* merge should already have been done */
 +                      ASSERT(0);
 +              }
 +      }
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      /* Actual dsc count per stream dsc validation*/
 +      if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
 +              context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
 +                              DML_FAIL_DSC_VALIDATION_FAILURE;
 +              goto validate_fail;
 +      }
 +#endif
 +
 +      BW_VAL_TRACE_END_VOLTAGE_LEVEL();
 +
 +      if (fast_validate) {
 +              BW_VAL_TRACE_SKIP(fast);
 +              out = true;
 +              goto validate_out;
 +      }
 +
 +      for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +
 +              pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
 +              pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
 +
 +              if (pipe_split_from[i] < 0) {
 +                      pipes[pipe_cnt].clks_cfg.dppclk_mhz =
 +                                      context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
 +                      if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
 +                              pipes[pipe_cnt].pipe.dest.odm_combine =
 +                                              context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
 +                      else
 +                              pipes[pipe_cnt].pipe.dest.odm_combine = 0;
 +                      pipe_idx++;
 +              } else {
 +                      pipes[pipe_cnt].clks_cfg.dppclk_mhz =
 +                                      context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
 +                      if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
 +                              pipes[pipe_cnt].pipe.dest.odm_combine =
 +                                              context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]];
 +                      else
 +                              pipes[pipe_cnt].pipe.dest.odm_combine = 0;
 +              }
 +              if (dc->config.forced_clocks) {
 +                      pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
 +                      pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
 +              }
 +              pipe_cnt++;
 +      }
 +
 +      if (pipe_cnt != pipe_idx) {
 +              if (dc->res_pool->funcs->populate_dml_pipes)
 +                      pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
 +                              &context->res_ctx, pipes);
 +              else
 +                      pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
 +                              &context->res_ctx, pipes);
 +      }
 +
 +      pipes[0].clks_cfg.voltage = vlevel;
 +      pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
 +      pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
 +
 +      /* only pipe 0 is read for voltage and dcf/soc clocks */
 +      if (vlevel < 1) {
 +              pipes[0].clks_cfg.voltage = 1;
 +              pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz;
 +              pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz;
 +      }
 +      context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +
 +      if (vlevel < 2) {
 +              pipes[0].clks_cfg.voltage = 2;
 +              pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
 +              pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
 +      }
 +      context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +
 +      if (vlevel < 3) {
 +              pipes[0].clks_cfg.voltage = 3;
 +              pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
 +              pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
 +      }
 +      context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +
 +      pipes[0].clks_cfg.voltage = vlevel;
 +      pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
 +      pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
 +      context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
 +      /* Writeback MCIF_WB arbitration parameters */
 +      dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
 +
 +      context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
 +      context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
 +      context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
 +      context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
 +      context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
 +      context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
 +      context->bw_ctx.bw.dcn.clk.p_state_change_support =
 +              context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
 +                                                      != dm_dram_clock_change_unsupported;
 +      context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
 +
 +      BW_VAL_TRACE_END_WATERMARKS();
 +
 +      for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +              pipes[pipe_idx].pipe.dest.vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx];
 +              pipes[pipe_idx].pipe.dest.vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx];
 +              pipes[pipe_idx].pipe.dest.vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx];
 +              pipes[pipe_idx].pipe.dest.vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx];
 +              if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
 +                      context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
 +              context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
 +                                              pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +              context->res_ctx.pipe_ctx[i].stream_res.dscclk_khz =
 +                              context->bw_ctx.dml.vba.DSCCLK_calculated[pipe_idx] * 1000;
 +#endif
 +              context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
 +              pipe_idx++;
 +      }
 +
 +      for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 +              bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
 +
 +              if (!context->res_ctx.pipe_ctx[i].stream)
 +                      continue;
 +
 +              context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
 +                              &context->res_ctx.pipe_ctx[i].dlg_regs,
 +                              &context->res_ctx.pipe_ctx[i].ttu_regs,
 +                              pipes,
 +                              pipe_cnt,
 +                              pipe_idx,
 +                              cstate_en,
 +                              context->bw_ctx.bw.dcn.clk.p_state_change_support,
 +                              false, false, false);
 +
 +              context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
 +                              &context->res_ctx.pipe_ctx[i].rq_regs,
 +                              pipes[pipe_idx].pipe);
 +              pipe_idx++;
 +      }
 +
 +      out = true;
 +      goto validate_out;
 +
 +validate_fail:
 +      DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
 +              dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
 +
 +      BW_VAL_TRACE_SKIP(fail);
 +      out = false;
 +
 +validate_out:
 +      kfree(pipes);
 +
 +      BW_VAL_TRACE_FINISH();
 +
 +      return out;
 +}
 +
 +struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
 +              struct dc_state *state,
 +              const struct resource_pool *pool,
 +              struct dc_stream_state *stream)
 +{
 +      struct resource_context *res_ctx = &state->res_ctx;
 +      struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
 +      struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
 +
 +      if (!head_pipe)
 +              ASSERT(0);
 +
 +      if (!idle_pipe)
 +              return false;
 +
 +      idle_pipe->stream = head_pipe->stream;
 +      idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
 +      idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
 +
 +      idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
 +      idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
 +      idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
 +      idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
 +
 +      return idle_pipe;
 +}
 +
 +bool dcn20_get_dcc_compression_cap(const struct dc *dc,
 +              const struct dc_dcc_surface_param *input,
 +              struct dc_surface_dcc_cap *output)
 +{
 +      return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
 +                      dc->res_pool->hubbub,
 +                      input,
 +                      output);
 +}
 +
 +static void dcn20_destroy_resource_pool(struct resource_pool **pool)
 +{
 +      struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
 +
 +      destruct(dcn20_pool);
 +      kfree(dcn20_pool);
 +      *pool = NULL;
 +}
 +
 +
 +static struct dc_cap_funcs cap_funcs = {
 +      .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
 +};
 +
 +
 +enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
 +{
 +      enum dc_status result = DC_OK;
 +
 +      enum surface_pixel_format surf_pix_format = plane_state->format;
 +      unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
 +
 +      enum swizzle_mode_values swizzle = DC_SW_LINEAR;
 +
 +      if (bpp == 64)
 +              swizzle = DC_SW_64KB_D;
 +      else
 +              swizzle = DC_SW_64KB_S;
 +
 +      plane_state->tiling_info.gfx9.swizzle = swizzle;
 +      return result;
 +}
 +
 +static struct resource_funcs dcn20_res_pool_funcs = {
 +      .destroy = dcn20_destroy_resource_pool,
 +      .link_enc_create = dcn20_link_encoder_create,
 +      .validate_bandwidth = dcn20_validate_bandwidth,
 +      .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
 +      .add_stream_to_ctx = dcn20_add_stream_to_ctx,
 +      .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
 +      .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
 +      .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
 +      .set_mcif_arb_params = dcn20_set_mcif_arb_params,
 +      .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
 +};
 +
 +bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
 +{
 +      int i;
 +      uint32_t pipe_count = pool->res_cap->num_dwb;
 +
 +      ASSERT(pipe_count > 0);
 +
 +      for (i = 0; i < pipe_count; i++) {
 +              struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
 +                                                  GFP_KERNEL);
 +
 +              if (!dwbc20) {
 +                      dm_error("DC: failed to create dwbc20!\n");
 +                      return false;
 +              }
 +              dcn20_dwbc_construct(dwbc20, ctx,
 +                              &dwbc20_regs[i],
 +                              &dwbc20_shift,
 +                              &dwbc20_mask,
 +                              i);
 +              pool->dwbc[i] = &dwbc20->base;
 +      }
 +      return true;
 +}
 +
 +bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
 +{
 +      int i;
 +      uint32_t pipe_count = pool->res_cap->num_dwb;
 +
 +      ASSERT(pipe_count > 0);
 +
 +      for (i = 0; i < pipe_count; i++) {
 +              struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub),
 +                                                  GFP_KERNEL);
 +
 +              if (!mcif_wb20) {
 +                      dm_error("DC: failed to create mcif_wb20!\n");
 +                      return false;
 +              }
 +
 +              dcn20_mmhubbub_construct(mcif_wb20, ctx,
 +                              &mcif_wb20_regs[i],
 +                              &mcif_wb20_shift,
 +                              &mcif_wb20_mask,
 +                              i);
 +
 +              pool->mcif_wb[i] = &mcif_wb20->base;
 +      }
 +      return true;
 +}
 +
 +struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
 +{
 +      struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
 +
 +      if (!pp_smu)
 +              return pp_smu;
 +
 +      dm_pp_get_funcs(ctx, pp_smu);
 +
 +      if (pp_smu->ctx.ver != PP_SMU_VER_NV)
 +              pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
 +
 +      return pp_smu;
 +}
 +
 +void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
 +{
 +      if (pp_smu && *pp_smu) {
 +              kfree(*pp_smu);
 +              *pp_smu = NULL;
 +      }
 +}
 +
 +static void cap_soc_clocks(
 +              struct _vcs_dpi_soc_bounding_box_st *bb,
 +              struct pp_smu_nv_clock_table max_clocks)
 +{
 +      int i;
 +
 +      // First pass - cap all clocks higher than the reported max
 +      for (i = 0; i < bb->num_states; i++) {
 +              if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000))
 +                              && max_clocks.dcfClockInKhz != 0)
 +                      bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16)
 +                                              && max_clocks.uClockInKhz != 0)
 +                      bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
 +
 +              if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
 +                                              && max_clocks.fabricClockInKhz != 0)
 +                      bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000))
 +                                              && max_clocks.displayClockInKhz != 0)
 +                      bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000))
 +                                              && max_clocks.dppClockInKhz != 0)
 +                      bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000))
 +                                              && max_clocks.phyClockInKhz != 0)
 +                      bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000))
 +                                              && max_clocks.socClockInKhz != 0)
 +                      bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000);
 +
 +              if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000))
 +                                              && max_clocks.dscClockInKhz != 0)
 +                      bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000);
 +      }
 +
 +      // Second pass - remove all duplicate clock states
 +      for (i = bb->num_states - 1; i > 1; i--) {
 +              bool duplicate = true;
 +
 +              if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz)
 +                      duplicate = false;
 +              if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz)
 +                      duplicate = false;
 +
 +              if (duplicate)
 +                      bb->num_states--;
 +      }
 +}
 +
 +static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
 +              struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
 +{
 +      struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
 +      int i;
 +      int num_calculated_states = 0;
 +      int min_dcfclk = 0;
 +
 +      if (num_states == 0)
 +              return;
 +
 +      if (dc->bb_overrides.min_dcfclk_mhz > 0)
 +              min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
 +
 +      for (i = 0; i < num_states; i++) {
 +              int min_fclk_required_by_uclk;
 +              calculated_states[i].state = i;
 +              calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
 +
 +              // FCLK:UCLK ratio is 1.08
 +              min_fclk_required_by_uclk = ((unsigned long long)uclk_states[i]) * 1080 / 1000000;
 +
 +              calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
 +                              min_dcfclk : min_fclk_required_by_uclk;
 +
 +              calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ?
 +                              max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
 +
 +              calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ?
 +                              max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
 +
 +              calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000;
 +              calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000;
 +              calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3);
 +
 +              calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000;
 +
 +              num_calculated_states++;
 +      }
 +
 +      memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits));
 +      bb->num_states = num_calculated_states;
 +
 +      // Duplicate the last state, DML always an extra state identical to max state to work
 +      memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st));
 +      bb->clock_limits[num_calculated_states].state = bb->num_states;
 +}
 +
 +static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
 +{
 +      kernel_fpu_begin();
 +      if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
 +                      && dc->bb_overrides.sr_exit_time_ns) {
 +              bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
 +      }
 +
 +      if ((int)(bb->sr_enter_plus_exit_time_us * 1000)
 +                              != dc->bb_overrides.sr_enter_plus_exit_time_ns
 +                      && dc->bb_overrides.sr_enter_plus_exit_time_ns) {
 +              bb->sr_enter_plus_exit_time_us =
 +                              dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
 +      }
 +
 +      if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
 +                      && dc->bb_overrides.urgent_latency_ns) {
 +              bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
 +      }
 +
 +      if ((int)(bb->dram_clock_change_latency_us * 1000)
 +                              != dc->bb_overrides.dram_clock_change_latency_ns
 +                      && dc->bb_overrides.dram_clock_change_latency_ns) {
 +              bb->dram_clock_change_latency_us =
 +                              dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
 +      }
 +      kernel_fpu_end();
 +}
 +
 +#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
 +#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
 +
 +static bool init_soc_bounding_box(struct dc *dc,
 +                                struct dcn20_resource_pool *pool)
 +{
 +      const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
 +      DC_LOGGER_INIT(dc->ctx->logger);
 +
 +      if (!bb && !SOC_BOUNDING_BOX_VALID) {
 +              DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
 +              return false;
 +      }
 +
 +      if (bb && !SOC_BOUNDING_BOX_VALID) {
 +              int i;
 +
 +              dcn2_0_soc.sr_exit_time_us =
 +                              fixed16_to_double_to_cpu(bb->sr_exit_time_us);
 +              dcn2_0_soc.sr_enter_plus_exit_time_us =
 +                              fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
 +              dcn2_0_soc.urgent_latency_us =
 +                              fixed16_to_double_to_cpu(bb->urgent_latency_us);
 +              dcn2_0_soc.urgent_latency_pixel_data_only_us =
 +                              fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
 +              dcn2_0_soc.urgent_latency_pixel_mixed_with_vm_data_us =
 +                              fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
 +              dcn2_0_soc.urgent_latency_vm_data_only_us =
 +                              fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
 +              dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
 +                              le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
 +              dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
 +                              le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
 +              dcn2_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
 +                              le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
 +              dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
 +                              fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
 +              dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
 +                              fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
 +              dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
 +                              fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
 +              dcn2_0_soc.max_avg_sdp_bw_use_normal_percent =
 +                              fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
 +              dcn2_0_soc.max_avg_dram_bw_use_normal_percent =
 +                              fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
 +              dcn2_0_soc.writeback_latency_us =
 +                              fixed16_to_double_to_cpu(bb->writeback_latency_us);
 +              dcn2_0_soc.ideal_dram_bw_after_urgent_percent =
 +                              fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
 +              dcn2_0_soc.max_request_size_bytes =
 +                              le32_to_cpu(bb->max_request_size_bytes);
 +              dcn2_0_soc.dram_channel_width_bytes =
 +                              le32_to_cpu(bb->dram_channel_width_bytes);
 +              dcn2_0_soc.fabric_datapath_to_dcn_data_return_bytes =
 +                              le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
 +              dcn2_0_soc.dcn_downspread_percent =
 +                              fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
 +              dcn2_0_soc.downspread_percent =
 +                              fixed16_to_double_to_cpu(bb->downspread_percent);
 +              dcn2_0_soc.dram_page_open_time_ns =
 +                              fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
 +              dcn2_0_soc.dram_rw_turnaround_time_ns =
 +                              fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
 +              dcn2_0_soc.dram_return_buffer_per_channel_bytes =
 +                              le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
 +              dcn2_0_soc.round_trip_ping_latency_dcfclk_cycles =
 +                              le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
 +              dcn2_0_soc.urgent_out_of_order_return_per_channel_bytes =
 +                              le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
 +              dcn2_0_soc.channel_interleave_bytes =
 +                              le32_to_cpu(bb->channel_interleave_bytes);
 +              dcn2_0_soc.num_banks =
 +                              le32_to_cpu(bb->num_banks);
 +              dcn2_0_soc.num_chans =
 +                              le32_to_cpu(bb->num_chans);
 +              dcn2_0_soc.vmm_page_size_bytes =
 +                              le32_to_cpu(bb->vmm_page_size_bytes);
 +              dcn2_0_soc.dram_clock_change_latency_us =
 +                              fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
 +              dcn2_0_soc.writeback_dram_clock_change_latency_us =
 +                              fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
 +              dcn2_0_soc.return_bus_width_bytes =
 +                              le32_to_cpu(bb->return_bus_width_bytes);
 +              dcn2_0_soc.dispclk_dppclk_vco_speed_mhz =
 +                              le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
 +              dcn2_0_soc.xfc_bus_transport_time_us =
 +                              le32_to_cpu(bb->xfc_bus_transport_time_us);
 +              dcn2_0_soc.xfc_xbuf_latency_tolerance_us =
 +                              le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
 +              dcn2_0_soc.use_urgent_burst_bw =
 +                              le32_to_cpu(bb->use_urgent_burst_bw);
 +              dcn2_0_soc.num_states =
 +                              le32_to_cpu(bb->num_states);
 +
 +              for (i = 0; i < dcn2_0_soc.num_states; i++) {
 +                      dcn2_0_soc.clock_limits[i].state =
 +                                      le32_to_cpu(bb->clock_limits[i].state);
 +                      dcn2_0_soc.clock_limits[i].dcfclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].fabricclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].dispclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].dppclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].phyclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].socclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].dscclk_mhz =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
 +                      dcn2_0_soc.clock_limits[i].dram_speed_mts =
 +                                      fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
 +              }
 +      }
 +
 +      if (pool->base.pp_smu) {
 +              struct pp_smu_nv_clock_table max_clocks = {0};
 +              unsigned int uclk_states[8] = {0};
 +              unsigned int num_states = 0;
 +              enum pp_smu_status status;
 +              bool clock_limits_available = false;
 +              bool uclk_states_available = false;
 +
 +              if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
 +                      status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
 +                              (&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
 +
 +                      uclk_states_available = (status == PP_SMU_RESULT_OK);
 +              }
 +
 +              if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
 +                      status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
 +                                      (&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
 +                      /* SMU cannot set DCF clock to anything equal to or higher than SOC clock
 +                       */
 +                      if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
 +                              max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
 +                      clock_limits_available = (status == PP_SMU_RESULT_OK);
 +              }
 +
 +              if (clock_limits_available && uclk_states_available && num_states)
 +                      update_bounding_box(dc, &dcn2_0_soc, &max_clocks, uclk_states, num_states);
 +              else if (clock_limits_available)
 +                      cap_soc_clocks(&dcn2_0_soc, max_clocks);
 +      }
 +
 +      dcn2_0_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
 +      dcn2_0_ip.max_num_dpp = pool->base.pipe_count;
 +      patch_bounding_box(dc, &dcn2_0_soc);
 +
 +      return true;
 +}
 +
 +static bool construct(
 +      uint8_t num_virtual_links,
 +      struct dc *dc,
 +      struct dcn20_resource_pool *pool)
 +{
 +      int i;
 +      struct dc_context *ctx = dc->ctx;
 +      struct irq_service_init_data init_data;
 +
 +      ctx->dc_bios->regs = &bios_regs;
 +
 +      pool->base.res_cap = &res_cap_nv10;
 +      pool->base.funcs = &dcn20_res_pool_funcs;
 +
 +      /*************************************************
 +       *  Resource + asic cap harcoding                *
 +       *************************************************/
 +      pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
 +
 +      pool->base.pipe_count = 6;
 +      pool->base.mpcc_count = 6;
 +      dc->caps.max_downscale_ratio = 200;
 +      dc->caps.i2c_speed_in_khz = 100;
 +      dc->caps.max_cursor_size = 256;
 +      dc->caps.dmdata_alloc_size = 2048;
 +
 +      dc->caps.max_slave_planes = 1;
 +      dc->caps.post_blend_color_processing = true;
 +      dc->caps.force_dp_tps4_for_cp2520 = true;
 +      dc->caps.hw_3d_lut = true;
 +
 +      if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
 +              dc->debug = debug_defaults_drv;
 +      else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
 +                      pool->base.pipe_count = 4;
 +
 +              pool->base.mpcc_count = pool->base.pipe_count;
 +              dc->debug = debug_defaults_diags;
 +      } else
 +              dc->debug = debug_defaults_diags;
 +      //dcn2.0x
 +      dc->work_arounds.dedcn20_305_wa = true;
 +
 +      // Init the vm_helper
 +      if (dc->vm_helper)
 +              vm_helper_init(dc->vm_helper, 16);
 +
 +      /*************************************************
 +       *  Create resources                             *
 +       *************************************************/
 +
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL0,
 +                              &clk_src_regs[0], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL1,
 +                              &clk_src_regs[1], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL2,
 +                              &clk_src_regs[2], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL3,
 +                              &clk_src_regs[3], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL4,
 +                              &clk_src_regs[4], false);
 +      pool->base.clock_sources[DCN20_CLK_SRC_PLL5] =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_COMBO_PHY_PLL5,
 +                              &clk_src_regs[5], false);
 +      pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL;
 +      /* todo: not reuse phy_pll registers */
 +      pool->base.dp_clock_source =
 +                      dcn20_clock_source_create(ctx, ctx->dc_bios,
 +                              CLOCK_SOURCE_ID_DP_DTO,
 +                              &clk_src_regs[0], true);
 +
 +      for (i = 0; i < pool->base.clk_src_count; i++) {
 +              if (pool->base.clock_sources[i] == NULL) {
 +                      dm_error("DC: failed to create clock sources!\n");
 +                      BREAK_TO_DEBUGGER();
 +                      goto create_fail;
 +              }
 +      }
 +
 +      pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
 +      if (pool->base.dccg == NULL) {
 +              dm_error("DC: failed to create dccg!\n");
 +              BREAK_TO_DEBUGGER();
 +              goto create_fail;
 +      }
 +
 +      pool->base.dmcu = dcn20_dmcu_create(ctx,
 +                      &dmcu_regs,
 +                      &dmcu_shift,
 +                      &dmcu_mask);
 +      if (pool->base.dmcu == NULL) {
 +              dm_error("DC: failed to create dmcu!\n");
 +              BREAK_TO_DEBUGGER();
 +              goto create_fail;
 +      }
 +
 +      pool->base.abm = dce_abm_create(ctx,
 +                      &abm_regs,
 +                      &abm_shift,
 +                      &abm_mask);
 +      if (pool->base.abm == NULL) {
 +              dm_error("DC: failed to create abm!\n");
 +              BREAK_TO_DEBUGGER();
 +              goto create_fail;
 +      }
 +
 +      pool->base.pp_smu = dcn20_pp_smu_create(ctx);
 +
 +
 +      if (!init_soc_bounding_box(dc, pool)) {
 +              dm_error("DC: failed to initialize soc bounding box!\n");
 +              BREAK_TO_DEBUGGER();
 +              goto create_fail;
 +      }
 +
 +      dml_init_instance(&dc->dml, &dcn2_0_soc, &dcn2_0_ip, DML_PROJECT_NAVI10);
 +
 +      if (!dc->debug.disable_pplib_wm_range) {
 +              struct pp_smu_wm_range_sets ranges = {0};
 +              int i = 0;
 +
 +              ranges.num_reader_wm_sets = 0;
 +
 +              if (dcn2_0_soc.num_states == 1) {
 +                      ranges.reader_wm_sets[0].wm_inst = i;
 +                      ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +                      ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +                      ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +                      ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +
 +                      ranges.num_reader_wm_sets = 1;
 +              } else if (dcn2_0_soc.num_states > 1) {
 +                      for (i = 0; i < 4 && i < dcn2_0_soc.num_states; i++) {
 +                              ranges.reader_wm_sets[i].wm_inst = i;
 +                              ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +                              ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +                              ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (dcn2_0_soc.clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
 +                              ranges.reader_wm_sets[i].max_fill_clk_mhz = dcn2_0_soc.clock_limits[i].dram_speed_mts / 16;
 +
 +                              ranges.num_reader_wm_sets = i + 1;
 +                      }
 +
 +                      ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +                      ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +              }
 +
 +              ranges.num_writer_wm_sets = 1;
 +
 +              ranges.writer_wm_sets[0].wm_inst = 0;
 +              ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +              ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +              ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
 +              ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
 +
 +              /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
 +              if (pool->base.pp_smu->nv_funcs.set_wm_ranges)
 +                      pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
 +      }
 +
 +      init_data.ctx = dc->ctx;
 +      pool->base.irqs = dal_irq_service_dcn20_create(&init_data);
 +      if (!pool->base.irqs)
 +              goto create_fail;
 +
 +      /* mem input -> ipp -> dpp -> opp -> TG */
 +      for (i = 0; i < pool->base.pipe_count; i++) {
 +              pool->base.hubps[i] = dcn20_hubp_create(ctx, i);
 +              if (pool->base.hubps[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC: failed to create memory input!\n");
 +                      goto create_fail;
 +              }
 +
 +              pool->base.ipps[i] = dcn20_ipp_create(ctx, i);
 +              if (pool->base.ipps[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC: failed to create input pixel processor!\n");
 +                      goto create_fail;
 +              }
 +
 +              pool->base.dpps[i] = dcn20_dpp_create(ctx, i);
 +              if (pool->base.dpps[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC: failed to create dpps!\n");
 +                      goto create_fail;
 +              }
 +      }
 +      for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
 +              pool->base.engines[i] = dcn20_aux_engine_create(ctx, i);
 +              if (pool->base.engines[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC:failed to create aux engine!!\n");
 +                      goto create_fail;
 +              }
 +              pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i);
 +              if (pool->base.hw_i2cs[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC:failed to create hw i2c!!\n");
 +                      goto create_fail;
 +              }
 +              pool->base.sw_i2cs[i] = NULL;
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_opp; i++) {
 +              pool->base.opps[i] = dcn20_opp_create(ctx, i);
 +              if (pool->base.opps[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC: failed to create output pixel processor!\n");
 +                      goto create_fail;
 +              }
 +      }
 +
 +      for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
 +              pool->base.timing_generators[i] = dcn20_timing_generator_create(
 +                              ctx, i);
 +              if (pool->base.timing_generators[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error("DC: failed to create tg!\n");
 +                      goto create_fail;
 +              }
 +      }
 +
 +      pool->base.timing_generator_count = i;
 +
 +      pool->base.mpc = dcn20_mpc_create(ctx);
 +      if (pool->base.mpc == NULL) {
 +              BREAK_TO_DEBUGGER();
 +              dm_error("DC: failed to create mpc!\n");
 +              goto create_fail;
 +      }
 +
 +      pool->base.hubbub = dcn20_hubbub_create(ctx);
 +      if (pool->base.hubbub == NULL) {
 +              BREAK_TO_DEBUGGER();
 +              dm_error("DC: failed to create hubbub!\n");
 +              goto create_fail;
 +      }
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
 +              pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
 +              if (pool->base.dscs[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error("DC: failed to create display stream compressor %d!\n", i);
 +                      goto create_fail;
 +              }
 +      }
 +#endif
 +
 +      if (!dcn20_dwbc_create(ctx, &pool->base)) {
 +              BREAK_TO_DEBUGGER();
 +              dm_error("DC: failed to create dwbc!\n");
 +              goto create_fail;
 +      }
 +      if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
 +              BREAK_TO_DEBUGGER();
 +              dm_error("DC: failed to create mcif_wb!\n");
 +              goto create_fail;
 +      }
 +
 +      if (!resource_construct(num_virtual_links, dc, &pool->base,
 +                      (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
 +                      &res_create_funcs : &res_create_maximus_funcs)))
 +                      goto create_fail;
 +
 +      dcn20_hw_sequencer_construct(dc);
 +
 +      dc->caps.max_planes =  pool->base.pipe_count;
 +
 +      for (i = 0; i < dc->caps.max_planes; ++i)
 +              dc->caps.planes[i] = plane_cap;
 +
 +      dc->cap_funcs = cap_funcs;
 +
 +      return true;
 +
 +create_fail:
 +
 +      destruct(pool);
 +
 +      return false;
 +}
 +
 +struct resource_pool *dcn20_create_resource_pool(
 +              const struct dc_init_data *init_data,
 +              struct dc *dc)
 +{
 +      struct dcn20_resource_pool *pool =
 +              kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
 +
 +      if (!pool)
 +              return NULL;
 +
 +      if (construct(init_data->num_virtual_links, dc, pool))
 +              return &pool->base;
 +
 +      BREAK_TO_DEBUGGER();
 +      kfree(pool);
 +      return NULL;
 +}
index 791aa74,0000000..f5bcffc
mode 100644,000000..100644
--- /dev/null
@@@ -1,608 -1,0 +1,610 @@@
 +/*
 + * Copyright 2012-15 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + *  and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/delay.h>
++
 +#include "dc_bios_types.h"
 +#include "dcn20_stream_encoder.h"
 +#include "reg_helper.h"
 +#include "hw_shared.h"
 +
 +#define DC_LOGGER \
 +              enc1->base.ctx->logger
 +
 +
 +#define REG(reg)\
 +      (enc1->regs->reg)
 +
 +#undef FN
 +#define FN(reg_name, field_name) \
 +      enc1->se_shift->field_name, enc1->se_mask->field_name
 +
 +
 +#define CTX \
 +      enc1->base.ctx
 +
 +
 +static void enc2_update_hdmi_info_packet(
 +      struct dcn10_stream_encoder *enc1,
 +      uint32_t packet_index,
 +      const struct dc_info_packet *info_packet)
 +{
 +      uint32_t cont, send, line;
 +
 +      if (info_packet->valid) {
 +              enc1_update_generic_info_packet(
 +                      enc1,
 +                      packet_index,
 +                      info_packet);
 +
 +              /* enable transmission of packet(s) -
 +               * packet transmission begins on the next frame */
 +              cont = 1;
 +              /* send packet(s) every frame */
 +              send = 1;
 +              /* select line number to send packets on */
 +              line = 2;
 +      } else {
 +              cont = 0;
 +              send = 0;
 +              line = 0;
 +      }
 +
 +      /* DP_SEC_GSP[x]_LINE_REFERENCE - keep default value REFER_TO_DP_SOF */
 +
 +      /* choose which generic packet control to use */
 +      switch (packet_index) {
 +      case 0:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC0_CONT, cont,
 +                              HDMI_GENERIC0_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
 +                              HDMI_GENERIC0_LINE, line);
 +              break;
 +      case 1:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC1_CONT, cont,
 +                              HDMI_GENERIC1_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
 +                              HDMI_GENERIC1_LINE, line);
 +              break;
 +      case 2:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC2_CONT, cont,
 +                              HDMI_GENERIC2_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
 +                              HDMI_GENERIC2_LINE, line);
 +              break;
 +      case 3:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC3_CONT, cont,
 +                              HDMI_GENERIC3_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
 +                              HDMI_GENERIC3_LINE, line);
 +              break;
 +      case 4:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC4_CONT, cont,
 +                              HDMI_GENERIC4_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
 +                              HDMI_GENERIC4_LINE, line);
 +              break;
 +      case 5:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC5_CONT, cont,
 +                              HDMI_GENERIC5_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
 +                              HDMI_GENERIC5_LINE, line);
 +              break;
 +      case 6:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC6_CONT, cont,
 +                              HDMI_GENERIC6_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
 +                              HDMI_GENERIC6_LINE, line);
 +              break;
 +      case 7:
 +              REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
 +                              HDMI_GENERIC7_CONT, cont,
 +                              HDMI_GENERIC7_SEND, send);
 +              REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
 +                              HDMI_GENERIC7_LINE, line);
 +              break;
 +      default:
 +              /* invalid HW packet index */
 +              DC_LOG_WARNING(
 +                      "Invalid HW packet index: %s()\n",
 +                      __func__);
 +              return;
 +      }
 +}
 +
 +static void enc2_stream_encoder_update_hdmi_info_packets(
 +      struct stream_encoder *enc,
 +      const struct encoder_info_frame *info_frame)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      /* for bring up, disable dp double  TODO */
 +      REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
 +
 +      /*Always add mandatory packets first followed by optional ones*/
 +      enc2_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
 +      enc2_update_hdmi_info_packet(enc1, 5, &info_frame->hfvsif);
 +      enc2_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
 +      enc2_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
 +      enc2_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
 +      enc2_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
 +}
 +
 +static void enc2_stream_encoder_stop_hdmi_info_packets(
 +      struct stream_encoder *enc)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      /* stop generic packets 0,1 on HDMI */
 +      REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
 +              HDMI_GENERIC0_CONT, 0,
 +              HDMI_GENERIC0_SEND, 0,
 +              HDMI_GENERIC1_CONT, 0,
 +              HDMI_GENERIC1_SEND, 0);
 +      REG_SET_2(HDMI_GENERIC_PACKET_CONTROL1, 0,
 +              HDMI_GENERIC0_LINE, 0,
 +              HDMI_GENERIC1_LINE, 0);
 +
 +      /* stop generic packets 2,3 on HDMI */
 +      REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
 +              HDMI_GENERIC2_CONT, 0,
 +              HDMI_GENERIC2_SEND, 0,
 +              HDMI_GENERIC3_CONT, 0,
 +              HDMI_GENERIC3_SEND, 0);
 +      REG_SET_2(HDMI_GENERIC_PACKET_CONTROL2, 0,
 +              HDMI_GENERIC2_LINE, 0,
 +              HDMI_GENERIC3_LINE, 0);
 +
 +      /* stop generic packets 4,5 on HDMI */
 +      REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
 +              HDMI_GENERIC4_CONT, 0,
 +              HDMI_GENERIC4_SEND, 0,
 +              HDMI_GENERIC5_CONT, 0,
 +              HDMI_GENERIC5_SEND, 0);
 +      REG_SET_2(HDMI_GENERIC_PACKET_CONTROL3, 0,
 +              HDMI_GENERIC4_LINE, 0,
 +              HDMI_GENERIC5_LINE, 0);
 +
 +      /* stop generic packets 6,7 on HDMI */
 +      REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
 +              HDMI_GENERIC6_CONT, 0,
 +              HDMI_GENERIC6_SEND, 0,
 +              HDMI_GENERIC7_CONT, 0,
 +              HDMI_GENERIC7_SEND, 0);
 +      REG_SET_2(HDMI_GENERIC_PACKET_CONTROL4, 0,
 +              HDMI_GENERIC6_LINE, 0,
 +              HDMI_GENERIC7_LINE, 0);
 +}
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +
 +
 +/* Update GSP7 SDP 128 byte long */
 +static void enc2_send_gsp7_128_info_packet(
 +      struct dcn10_stream_encoder *enc1,
 +      const struct dc_info_packet_128 *info_packet)
 +{
 +      uint32_t i;
 +
 +      /* TODOFPGA Figure out a proper number for max_retries polling for lock
 +       * use 50 for now.
 +       */
 +      uint32_t max_retries = 50;
 +      const uint32_t *content = (const uint32_t *) &info_packet->sb[0];
 +
 +      ASSERT(info_packet->hb1  == DC_DP_INFOFRAME_TYPE_PPS);
 +
 +      /* Configure for PPS packet size (128 bytes) */
 +      REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 1);
 +
 +      /* We need turn on clock before programming AFMT block*/
 +      REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
 +
 +      /* Poll dig_update_lock is not locked -> asic internal signal
 +       * assumes otg master lock will unlock it
 +       */
 +      /*REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, 0, 10, max_retries);*/
 +
 +      /* Wait for HW/SW GSP memory access conflict to go away */
 +      REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
 +                      0, 10, max_retries);
 +
 +      /* Clear HW/SW memory access conflict flag */
 +      REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
 +
 +      /* write generic packet header */
 +      REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, 7);
 +      REG_SET_4(AFMT_GENERIC_HDR, 0,
 +                      AFMT_GENERIC_HB0, info_packet->hb0,
 +                      AFMT_GENERIC_HB1, info_packet->hb1,
 +                      AFMT_GENERIC_HB2, info_packet->hb2,
 +                      AFMT_GENERIC_HB3, info_packet->hb3);
 +
 +      /* Write generic packet content 128 bytes long. Four sets are used (indexes 7
 +       * through 10) to fit 128 bytes.
 +       */
 +      for (i = 0; i < 4; i++) {
 +              uint32_t packet_index = 7 + i;
 +              REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, packet_index);
 +
 +              REG_WRITE(AFMT_GENERIC_0, *content++);
 +              REG_WRITE(AFMT_GENERIC_1, *content++);
 +              REG_WRITE(AFMT_GENERIC_2, *content++);
 +              REG_WRITE(AFMT_GENERIC_3, *content++);
 +              REG_WRITE(AFMT_GENERIC_4, *content++);
 +              REG_WRITE(AFMT_GENERIC_5, *content++);
 +              REG_WRITE(AFMT_GENERIC_6, *content++);
 +              REG_WRITE(AFMT_GENERIC_7, *content++);
 +      }
 +
 +      REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, 1);
 +}
 +
 +/* Set DSC-related configuration.
 + *   dsc_mode: 0 disables DSC, other values enable DSC in specified format
 + *   sc_bytes_per_pixel: Bytes per pixel in u3.28 format
 + *   dsc_slice_width: Slice width in pixels
 + */
 +static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
 +                                      enum optc_dsc_mode dsc_mode,
 +                                      uint32_t dsc_bytes_per_pixel,
 +                                      uint32_t dsc_slice_width,
 +                                      uint8_t *dsc_packed_pps)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +      uint32_t dsc_value = 0;
 +
 +      dsc_value = REG_READ(DP_DSC_CNTL);
 +
 +      /* dsc disable skip */
 +      if ((dsc_value & 0x3) == 0x0)
 +              return;
 +
 +
 +      REG_UPDATE_2(DP_DSC_CNTL,
 +                      DP_DSC_MODE, dsc_mode,
 +                      DP_DSC_SLICE_WIDTH, dsc_slice_width);
 +
 +      REG_SET(DP_DSC_BYTES_PER_PIXEL, 0,
 +              DP_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel);
 +
 +      if (dsc_mode != OPTC_DSC_DISABLED) {
 +              struct dc_info_packet_128 pps_sdp;
 +
 +              ASSERT(dsc_packed_pps);
 +
 +              /* Load PPS into infoframe (SDP) registers */
 +              pps_sdp.valid = true;
 +              pps_sdp.hb0 = 0;
 +              pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS;
 +              pps_sdp.hb2 = 127;
 +              pps_sdp.hb3 = 0;
 +              memcpy(&pps_sdp.sb[0], dsc_packed_pps, sizeof(pps_sdp.sb));
 +              enc2_send_gsp7_128_info_packet(enc1, &pps_sdp);
 +
 +              /* Enable Generic Stream Packet 7 (GSP) transmission */
 +              //REG_UPDATE(DP_SEC_CNTL,
 +              //      DP_SEC_GSP7_ENABLE, 1);
 +
 +              /* SW should make sure VBID[6] update line number is bigger
 +               * than PPS transmit line number
 +               */
 +              REG_UPDATE(DP_SEC_CNTL6,
 +                              DP_SEC_GSP7_LINE_NUM, 2);
 +              REG_UPDATE_2(DP_MSA_VBID_MISC,
 +                              DP_VBID6_LINE_REFERENCE, 0,
 +                              DP_VBID6_LINE_NUM, 3);
 +
 +              /* Send PPS data at the line number specified above.
 +               * DP spec requires PPS to be sent only when it changes, however since
 +               * decoder has to be able to handle its change on every frame, we're
 +               * sending it always (i.e. on every frame) to reduce the chance it'd be
 +               * missed by decoder. If it turns out required to send PPS only when it
 +               * changes, we can use DP_SEC_GSP7_SEND register.
 +               */
 +              REG_UPDATE_2(DP_SEC_CNTL,
 +                      DP_SEC_GSP7_ENABLE, 1,
 +                      DP_SEC_STREAM_ENABLE, 1);
 +      } else {
 +              /* Disable Generic Stream Packet 7 (GSP) transmission */
 +              REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, 0);
 +              REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP7_PPS, 0);
 +      }
 +}
 +#endif
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
 + * into a dcn_dsc_state struct.
 + */
 +static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      //if dsc is enabled, continue to read
 +      REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode);
 +      if (s->dsc_mode) {
 +              REG_GET(DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, &s->dsc_slice_width);
 +              REG_GET(DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, &s->sec_gsp_pps_line_num);
 +
 +              REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference);
 +              REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num);
 +
 +              REG_GET(DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, &s->sec_gsp_pps_enable);
 +              REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
 +      }
 +}
 +#endif
 +
 +/* Set Dynamic Metadata-configuration.
 + *   enable_dme:         TRUE: enables Dynamic Metadata Enfine, FALSE: disables DME
 + *   hubp_requestor_id:  HUBP physical instance that is the source of dynamic metadata
 + *                       only needs to be set when enable_dme is TRUE
 + *   dmdata_mode:        dynamic metadata packet type: DP, HDMI, or Dolby Vision
 + *
 + *   Ensure the OTG master update lock is set when changing DME configuration.
 + */
 +static void enc2_set_dynamic_metadata(struct stream_encoder *enc,
 +              bool enable_dme,
 +              uint32_t hubp_requestor_id,
 +              enum dynamic_metadata_mode dmdata_mode)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      if (enable_dme) {
 +              REG_UPDATE_2(DME_CONTROL,
 +                              METADATA_HUBP_REQUESTOR_ID, hubp_requestor_id,
 +                              METADATA_STREAM_TYPE, (dmdata_mode == dmdata_dolby_vision) ? 1 : 0);
 +
 +              /* Use default line reference DP_SOF for bringup.
 +               * Should use OTG_SOF for DRR cases
 +               */
 +              if (dmdata_mode == dmdata_dp)
 +                      REG_UPDATE_3(DP_SEC_METADATA_TRANSMISSION,
 +                                      DP_SEC_METADATA_PACKET_ENABLE, 1,
 +                                      DP_SEC_METADATA_PACKET_LINE_REFERENCE, 0,
 +                                      DP_SEC_METADATA_PACKET_LINE, 20);
 +              else {
 +                      REG_UPDATE_3(HDMI_METADATA_PACKET_CONTROL,
 +                                      HDMI_METADATA_PACKET_ENABLE, 1,
 +                                      HDMI_METADATA_PACKET_LINE_REFERENCE, 0,
 +                                      HDMI_METADATA_PACKET_LINE, 2);
 +
 +                      if (dmdata_mode == dmdata_dolby_vision)
 +                              REG_UPDATE(DIG_FE_CNTL,
 +                                              DOLBY_VISION_EN, 1);
 +              }
 +
 +              REG_UPDATE(DME_CONTROL,
 +                              METADATA_ENGINE_EN, 1);
 +      } else {
 +              REG_UPDATE(DME_CONTROL,
 +                              METADATA_ENGINE_EN, 0);
 +
 +              if (dmdata_mode == dmdata_dp)
 +                      REG_UPDATE(DP_SEC_METADATA_TRANSMISSION,
 +                                      DP_SEC_METADATA_PACKET_ENABLE, 0);
 +              else {
 +                      REG_UPDATE(HDMI_METADATA_PACKET_CONTROL,
 +                                      HDMI_METADATA_PACKET_ENABLE, 0);
 +                      REG_UPDATE(DIG_FE_CNTL,
 +                                      DOLBY_VISION_EN, 0);
 +              }
 +      }
 +}
 +
 +static void enc2_stream_encoder_update_dp_info_packets(
 +      struct stream_encoder *enc,
 +      const struct encoder_info_frame *info_frame)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +      uint32_t dmdata_packet_enabled = 0;
 +
 +      enc1_stream_encoder_update_dp_info_packets(enc, info_frame);
 +
 +      /* check if dynamic metadata packet transmission is enabled */
 +      REG_GET(DP_SEC_METADATA_TRANSMISSION,
 +                      DP_SEC_METADATA_PACKET_ENABLE, &dmdata_packet_enabled);
 +
 +      if (dmdata_packet_enabled)
 +              REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 +}
 +
 +static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
 +{
 +      bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422
 +                      && !timing->dsc_cfg.ycbcr422_simple);
 +#endif
 +      return two_pix;
 +}
 +
 +void enc2_stream_encoder_dp_unblank(
 +              struct stream_encoder *enc,
 +              const struct encoder_unblank_param *param)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
 +              uint32_t n_vid = 0x8000;
 +              uint32_t m_vid;
 +              uint32_t n_multiply = 0;
 +              uint64_t m_vid_l = n_vid;
 +
 +              /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
 +              if (is_two_pixels_per_containter(&param->timing) || param->odm) {
 +                      /*this logic should be the same in get_pixel_clock_parameters() */
 +                      n_multiply = 1;
 +              }
 +              /* M / N = Fstream / Flink
 +               * m_vid / n_vid = pixel rate / link rate
 +               */
 +
 +              m_vid_l *= param->timing.pix_clk_100hz / 10;
 +              m_vid_l = div_u64(m_vid_l,
 +                      param->link_settings.link_rate
 +                              * LINK_RATE_REF_FREQ_IN_KHZ);
 +
 +              m_vid = (uint32_t) m_vid_l;
 +
 +              /* enable auto measurement */
 +
 +              REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
 +
 +              /* auto measurement need 1 full 0x8000 symbol cycle to kick in,
 +               * therefore program initial value for Mvid and Nvid
 +               */
 +
 +              REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
 +
 +              REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
 +
 +              REG_UPDATE_2(DP_VID_TIMING,
 +                              DP_VID_M_N_GEN_EN, 1,
 +                              DP_VID_N_MUL, n_multiply);
 +      }
 +
 +      /* set DIG_START to 0x1 to reset FIFO */
 +
 +      REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
 +
 +      /* write 0 to take the FIFO out of reset */
 +
 +      REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
 +
 +      /* switch DP encoder to CRTC data */
 +
 +      REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
 +
 +      /* wait 100us for DIG/DP logic to prime
 +       * (i.e. a few video lines)
 +       */
 +      udelay(100);
 +
 +      /* the hardware would start sending video at the start of the next DP
 +       * frame (i.e. rising edge of the vblank).
 +       * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
 +       * register has no effect on enable transition! HW always guarantees
 +       * VID_STREAM enable at start of next frame, and this is not
 +       * programmable
 +       */
 +
 +      REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
 +}
 +
 +static void enc2_dp_set_odm_combine(
 +      struct stream_encoder *enc,
 +      bool odm_combine)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, odm_combine);
 +}
 +
 +void enc2_stream_encoder_dp_set_stream_attribute(
 +      struct stream_encoder *enc,
 +      struct dc_crtc_timing *crtc_timing,
 +      enum dc_color_space output_color_space,
 +      uint32_t enable_sdp_splitting)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +
 +      enc1_stream_encoder_dp_set_stream_attribute(enc, crtc_timing, output_color_space, enable_sdp_splitting);
 +
 +      REG_UPDATE(DP_SEC_FRAMING4,
 +              DP_SST_SDP_SPLITTING, enable_sdp_splitting);
 +}
 +
 +static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
 +      .dp_set_odm_combine =
 +              enc2_dp_set_odm_combine,
 +      .dp_set_stream_attribute =
 +              enc2_stream_encoder_dp_set_stream_attribute,
 +      .hdmi_set_stream_attribute =
 +              enc1_stream_encoder_hdmi_set_stream_attribute,
 +      .dvi_set_stream_attribute =
 +              enc1_stream_encoder_dvi_set_stream_attribute,
 +      .set_mst_bandwidth =
 +              enc1_stream_encoder_set_mst_bandwidth,
 +      .update_hdmi_info_packets =
 +              enc2_stream_encoder_update_hdmi_info_packets,
 +      .stop_hdmi_info_packets =
 +              enc2_stream_encoder_stop_hdmi_info_packets,
 +      .update_dp_info_packets =
 +              enc2_stream_encoder_update_dp_info_packets,
 +      .stop_dp_info_packets =
 +              enc1_stream_encoder_stop_dp_info_packets,
 +      .dp_blank =
 +              enc1_stream_encoder_dp_blank,
 +      .dp_unblank =
 +              enc2_stream_encoder_dp_unblank,
 +      .audio_mute_control = enc1_se_audio_mute_control,
 +
 +      .dp_audio_setup = enc1_se_dp_audio_setup,
 +      .dp_audio_enable = enc1_se_dp_audio_enable,
 +      .dp_audio_disable = enc1_se_dp_audio_disable,
 +
 +      .hdmi_audio_setup = enc1_se_hdmi_audio_setup,
 +      .hdmi_audio_disable = enc1_se_hdmi_audio_disable,
 +      .setup_stereo_sync  = enc1_setup_stereo_sync,
 +      .set_avmute = enc1_stream_encoder_set_avmute,
 +      .dig_connect_to_otg  = enc1_dig_connect_to_otg,
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      .enc_read_state = enc2_read_state,
 +#endif
 +
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      .dp_set_dsc_config = enc2_dp_set_dsc_config,
 +#endif
 +      .set_dynamic_metadata = enc2_set_dynamic_metadata,
 +};
 +
 +void dcn20_stream_encoder_construct(
 +      struct dcn10_stream_encoder *enc1,
 +      struct dc_context *ctx,
 +      struct dc_bios *bp,
 +      enum engine_id eng_id,
 +      const struct dcn10_stream_enc_registers *regs,
 +      const struct dcn10_stream_encoder_shift *se_shift,
 +      const struct dcn10_stream_encoder_mask *se_mask)
 +{
 +      enc1->base.funcs = &dcn20_str_enc_funcs;
 +      enc1->base.ctx = ctx;
 +      enc1->base.id = eng_id;
 +      enc1->base.bp = bp;
 +      enc1->regs = regs;
 +      enc1->se_shift = se_shift;
 +      enc1->se_mask = se_mask;
 +}
 +
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/delay.h>
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "include/gpio_types.h"
@@@ -144,15 -147,6 +147,15 @@@ static enum gpio_result set_config
                                        AUX_PAD1_MODE, 0);
                }
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +              if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
 +                              REG_UPDATE(dc_gpio_aux_ctrl_5, DDC_PAD_I2CMODE, 1);
 +              }
 +              //set  DC_IO_aux_rxsel = 2'b01
 +              if (ddc->regs->phy_aux_cntl != 0) {
 +                              REG_UPDATE(phy_aux_cntl, AUX_PAD_RXSEL, 1);
 +              }
 +#endif
                return GPIO_RESULT_OK;
        case GPIO_DDC_CONFIG_TYPE_MODE_AUX:
                /* set the AUX pad mode */
                        REG_SET(gpio.MASK_reg, regval,
                                        AUX_PAD1_MODE, 1);
                }
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +              if (ddc->regs->dc_gpio_aux_ctrl_5 != 0) {
 +                      REG_UPDATE(dc_gpio_aux_ctrl_5,
 +                                      DDC_PAD_I2CMODE, 0);
 +              }
 +#endif
  
                return GPIO_RESULT_OK;
        case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  /*
@@@ -46,9 -48,6 +48,9 @@@
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  #include "dcn10/hw_factory_dcn10.h"
  #endif
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +#include "dcn20/hw_factory_dcn20.h"
 +#endif
  
  #include "diagnostics/hw_factory_diag.h"
  
@@@ -92,12 -91,6 +94,12 @@@ bool dal_hw_factory_init
                return true;
  #endif
  
 +#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
 +      case DCN_VERSION_2_0:
 +              dal_hw_factory_dcn20_init(factory);
 +              return true;
 +#endif
 +
        default:
                ASSERT_CRITICAL(false);
                return false;
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "include/logger_interface.h"
@@@ -36,7 -38,7 +38,7 @@@
  
  #include "irq_service_dcn10.h"
  
 -#include "ivsrcid/irqsrcs_dcn_1_0.h"
 +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
  
  enum dc_irq_source to_dal_irq_source_dcn10(
                struct irq_service *irq_service,
index 65866d6,0000000..3cc0f2a
mode 100644,000000..100644
--- /dev/null
@@@ -1,373 -1,0 +1,375 @@@
 +/*
 + * Copyright 2018 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/slab.h>
++
 +#include "dm_services.h"
 +
 +#include "include/logger_interface.h"
 +
 +#include "../dce110/irq_service_dce110.h"
 +
 +#include "dcn/dcn_2_0_0_offset.h"
 +#include "dcn/dcn_2_0_0_sh_mask.h"
 +#include "navi10_ip_offset.h"
 +
 +
 +#include "irq_service_dcn20.h"
 +
 +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
 +
 +enum dc_irq_source to_dal_irq_source_dcn20(
 +              struct irq_service *irq_service,
 +              uint32_t src_id,
 +              uint32_t ext_id)
 +{
 +      switch (src_id) {
 +      case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK1;
 +      case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK2;
 +      case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK3;
 +      case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK4;
 +      case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK5;
 +      case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
 +              return DC_IRQ_SOURCE_VBLANK6;
 +      case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP1;
 +      case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP2;
 +      case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP3;
 +      case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP4;
 +      case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP5;
 +      case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
 +              return DC_IRQ_SOURCE_PFLIP6;
 +      case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE1;
 +      case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE2;
 +      case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE3;
 +      case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE4;
 +      case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE5;
 +      case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
 +              return DC_IRQ_SOURCE_VUPDATE6;
 +
 +      case DCN_1_0__SRCID__DC_HPD1_INT:
 +              /* generic src_id for all HPD and HPDRX interrupts */
 +              switch (ext_id) {
 +              case DCN_1_0__CTXID__DC_HPD1_INT:
 +                      return DC_IRQ_SOURCE_HPD1;
 +              case DCN_1_0__CTXID__DC_HPD2_INT:
 +                      return DC_IRQ_SOURCE_HPD2;
 +              case DCN_1_0__CTXID__DC_HPD3_INT:
 +                      return DC_IRQ_SOURCE_HPD3;
 +              case DCN_1_0__CTXID__DC_HPD4_INT:
 +                      return DC_IRQ_SOURCE_HPD4;
 +              case DCN_1_0__CTXID__DC_HPD5_INT:
 +                      return DC_IRQ_SOURCE_HPD5;
 +              case DCN_1_0__CTXID__DC_HPD6_INT:
 +                      return DC_IRQ_SOURCE_HPD6;
 +              case DCN_1_0__CTXID__DC_HPD1_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD1RX;
 +              case DCN_1_0__CTXID__DC_HPD2_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD2RX;
 +              case DCN_1_0__CTXID__DC_HPD3_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD3RX;
 +              case DCN_1_0__CTXID__DC_HPD4_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD4RX;
 +              case DCN_1_0__CTXID__DC_HPD5_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD5RX;
 +              case DCN_1_0__CTXID__DC_HPD6_RX_INT:
 +                      return DC_IRQ_SOURCE_HPD6RX;
 +              default:
 +                      return DC_IRQ_SOURCE_INVALID;
 +              }
 +              break;
 +
 +      default:
 +              return DC_IRQ_SOURCE_INVALID;
 +      }
 +}
 +
 +static bool hpd_ack(
 +      struct irq_service *irq_service,
 +      const struct irq_source_info *info)
 +{
 +      uint32_t addr = info->status_reg;
 +      uint32_t value = dm_read_reg(irq_service->ctx, addr);
 +      uint32_t current_status =
 +              get_reg_field_value(
 +                      value,
 +                      HPD0_DC_HPD_INT_STATUS,
 +                      DC_HPD_SENSE_DELAYED);
 +
 +      dal_irq_service_ack_generic(irq_service, info);
 +
 +      value = dm_read_reg(irq_service->ctx, info->enable_reg);
 +
 +      set_reg_field_value(
 +              value,
 +              current_status ? 0 : 1,
 +              HPD0_DC_HPD_INT_CONTROL,
 +              DC_HPD_INT_POLARITY);
 +
 +      dm_write_reg(irq_service->ctx, info->enable_reg, value);
 +
 +      return true;
 +}
 +
 +static const struct irq_source_info_funcs hpd_irq_info_funcs = {
 +      .set = NULL,
 +      .ack = hpd_ack
 +};
 +
 +static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
 +      .set = NULL,
 +      .ack = NULL
 +};
 +
 +static const struct irq_source_info_funcs pflip_irq_info_funcs = {
 +      .set = NULL,
 +      .ack = NULL
 +};
 +
 +static const struct irq_source_info_funcs vblank_irq_info_funcs = {
 +      .set = NULL,
 +      .ack = NULL
 +};
 +
 +#undef BASE_INNER
 +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
 +
 +/* compile time expand base address. */
 +#define BASE(seg) \
 +      BASE_INNER(seg)
 +
 +
 +#define SRI(reg_name, block, id)\
 +      BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
 +                      mm ## block ## id ## _ ## reg_name
 +
 +
 +#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
 +      .enable_reg = SRI(reg1, block, reg_num),\
 +      .enable_mask = \
 +              block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
 +      .enable_value = {\
 +              block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
 +              ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
 +      },\
 +      .ack_reg = SRI(reg2, block, reg_num),\
 +      .ack_mask = \
 +              block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
 +      .ack_value = \
 +              block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
 +
 +
 +
 +#define hpd_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
 +              IRQ_REG_ENTRY(HPD, reg_num,\
 +                      DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
 +                      DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
 +              .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
 +              .funcs = &hpd_irq_info_funcs\
 +      }
 +
 +#define hpd_rx_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
 +              IRQ_REG_ENTRY(HPD, reg_num,\
 +                      DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
 +                      DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
 +              .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
 +              .funcs = &hpd_rx_irq_info_funcs\
 +      }
 +#define pflip_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
 +              IRQ_REG_ENTRY(HUBPREQ, reg_num,\
 +                      DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
 +                      DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
 +              .funcs = &pflip_irq_info_funcs\
 +      }
 +
 +#define vupdate_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
 +              IRQ_REG_ENTRY(OTG, reg_num,\
 +                      OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
 +                      OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
 +              .funcs = &vblank_irq_info_funcs\
 +      }
 +
 +#define vblank_int_entry(reg_num)\
 +      [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
 +              IRQ_REG_ENTRY(OTG, reg_num,\
 +                      OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
 +                      OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
 +              .funcs = &vblank_irq_info_funcs\
 +      }
 +
 +#define dummy_irq_entry() \
 +      {\
 +              .funcs = &dummy_irq_info_funcs\
 +      }
 +
 +#define i2c_int_entry(reg_num) \
 +      [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
 +
 +#define dp_sink_int_entry(reg_num) \
 +      [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
 +
 +#define gpio_pad_int_entry(reg_num) \
 +      [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
 +
 +#define dc_underflow_int_entry(reg_num) \
 +      [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
 +
 +static const struct irq_source_info_funcs dummy_irq_info_funcs = {
 +      .set = dal_irq_service_dummy_set,
 +      .ack = dal_irq_service_dummy_ack
 +};
 +
 +static const struct irq_source_info
 +irq_source_info_dcn20[DAL_IRQ_SOURCES_NUMBER] = {
 +      [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
 +      hpd_int_entry(0),
 +      hpd_int_entry(1),
 +      hpd_int_entry(2),
 +      hpd_int_entry(3),
 +      hpd_int_entry(4),
 +      hpd_int_entry(5),
 +      hpd_rx_int_entry(0),
 +      hpd_rx_int_entry(1),
 +      hpd_rx_int_entry(2),
 +      hpd_rx_int_entry(3),
 +      hpd_rx_int_entry(4),
 +      hpd_rx_int_entry(5),
 +      i2c_int_entry(1),
 +      i2c_int_entry(2),
 +      i2c_int_entry(3),
 +      i2c_int_entry(4),
 +      i2c_int_entry(5),
 +      i2c_int_entry(6),
 +      dp_sink_int_entry(1),
 +      dp_sink_int_entry(2),
 +      dp_sink_int_entry(3),
 +      dp_sink_int_entry(4),
 +      dp_sink_int_entry(5),
 +      dp_sink_int_entry(6),
 +      [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
 +      pflip_int_entry(0),
 +      pflip_int_entry(1),
 +      pflip_int_entry(2),
 +      pflip_int_entry(3),
 +      [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
 +      [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
 +      [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
 +      gpio_pad_int_entry(0),
 +      gpio_pad_int_entry(1),
 +      gpio_pad_int_entry(2),
 +      gpio_pad_int_entry(3),
 +      gpio_pad_int_entry(4),
 +      gpio_pad_int_entry(5),
 +      gpio_pad_int_entry(6),
 +      gpio_pad_int_entry(7),
 +      gpio_pad_int_entry(8),
 +      gpio_pad_int_entry(9),
 +      gpio_pad_int_entry(10),
 +      gpio_pad_int_entry(11),
 +      gpio_pad_int_entry(12),
 +      gpio_pad_int_entry(13),
 +      gpio_pad_int_entry(14),
 +      gpio_pad_int_entry(15),
 +      gpio_pad_int_entry(16),
 +      gpio_pad_int_entry(17),
 +      gpio_pad_int_entry(18),
 +      gpio_pad_int_entry(19),
 +      gpio_pad_int_entry(20),
 +      gpio_pad_int_entry(21),
 +      gpio_pad_int_entry(22),
 +      gpio_pad_int_entry(23),
 +      gpio_pad_int_entry(24),
 +      gpio_pad_int_entry(25),
 +      gpio_pad_int_entry(26),
 +      gpio_pad_int_entry(27),
 +      gpio_pad_int_entry(28),
 +      gpio_pad_int_entry(29),
 +      gpio_pad_int_entry(30),
 +      dc_underflow_int_entry(1),
 +      dc_underflow_int_entry(2),
 +      dc_underflow_int_entry(3),
 +      dc_underflow_int_entry(4),
 +      dc_underflow_int_entry(5),
 +      dc_underflow_int_entry(6),
 +      [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
 +      [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
 +      vupdate_int_entry(0),
 +      vupdate_int_entry(1),
 +      vupdate_int_entry(2),
 +      vupdate_int_entry(3),
 +      vupdate_int_entry(4),
 +      vupdate_int_entry(5),
 +      vblank_int_entry(0),
 +      vblank_int_entry(1),
 +      vblank_int_entry(2),
 +      vblank_int_entry(3),
 +      vblank_int_entry(4),
 +      vblank_int_entry(5),
 +};
 +
 +static const struct irq_service_funcs irq_service_funcs_dcn20 = {
 +              .to_dal_irq_source = to_dal_irq_source_dcn20
 +};
 +
 +static void construct(
 +      struct irq_service *irq_service,
 +      struct irq_service_init_data *init_data)
 +{
 +      dal_irq_service_construct(irq_service, init_data);
 +
 +      irq_service->info = irq_source_info_dcn20;
 +      irq_service->funcs = &irq_service_funcs_dcn20;
 +}
 +
 +struct irq_service *dal_irq_service_dcn20_create(
 +      struct irq_service_init_data *init_data)
 +{
 +      struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
 +                                                GFP_KERNEL);
 +
 +      if (!irq_service)
 +              return NULL;
 +
 +      construct(irq_service, init_data);
 +      return irq_service;
 +}
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "virtual_stream_encoder.h"
  
@@@ -75,22 -77,7 +77,22 @@@ static void virtual_audio_mute_control
        struct stream_encoder *enc,
        bool mute) {}
  
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +static void virtual_enc_dp_set_odm_combine(
 +      struct stream_encoder *enc,
 +      bool odm_combine)
 +{}
 +#endif
 +#endif
 +
  static const struct stream_encoder_funcs virtual_str_enc_funcs = {
 +#ifdef CONFIG_DRM_AMD_DC_DCN2_0
 +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 +      .dp_set_odm_combine =
 +              virtual_enc_dp_set_odm_combine,
 +#endif
 +#endif
        .dp_set_stream_attribute =
                virtual_stream_encoder_dp_set_stream_attribute,
        .hdmi_set_stream_attribute =
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/mm.h>
+ #include <linux/slab.h>
  #include "dc.h"
  #include "opp.h"
  #include "color_gamma.h"
@@@ -1569,14 -1572,15 +1572,14 @@@ bool mod_color_calculate_regamma_params
                        output_tf->tf == TRANSFER_FUNCTION_SRGB) {
                if (ramp == NULL)
                        return true;
 -              if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
 +              if ((ramp->is_logical_identity) ||
                                (!mapUserRamp && ramp->type == GAMMA_RGB_256))
                        return true;
        }
  
        output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
  
 -      if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
 -                      (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
 +      if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
                rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
                            sizeof(*rgb_user),
                            GFP_KERNEL);
@@@ -20,9 -20,9 +20,9 @@@
   * OTHER DEALINGS IN THE SOFTWARE.
   */
  
- #include "pp_debug.h"
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include "pp_debug.h"
  #include "amdgpu.h"
  #include "amdgpu_smu.h"
  #include "soc15_common.h"
@@@ -60,167 -60,6 +60,167 @@@ int smu_get_smc_version(struct smu_cont
        return ret;
  }
  
 +int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 +                          uint32_t min, uint32_t max)
 +{
 +      int ret = 0, clk_id = 0;
 +      uint32_t param;
 +
 +      if (min <= 0 && max <= 0)
 +              return -EINVAL;
 +
 +      clk_id = smu_clk_get_index(smu, clk_type);
 +      if (clk_id < 0)
 +              return clk_id;
 +
 +      if (max > 0) {
 +              param = (uint32_t)((clk_id << 16) | (max & 0xffff));
 +              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
 +                                                param);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      if (min > 0) {
 +              param = (uint32_t)((clk_id << 16) | (min & 0xffff));
 +              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
 +                                                param);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +
 +      return ret;
 +}
 +
 +int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 +                          uint32_t min, uint32_t max)
 +{
 +      int ret = 0, clk_id = 0;
 +      uint32_t param;
 +
 +      if (min <= 0 && max <= 0)
 +              return -EINVAL;
 +
 +      clk_id = smu_clk_get_index(smu, clk_type);
 +      if (clk_id < 0)
 +              return clk_id;
 +
 +      if (max > 0) {
 +              param = (uint32_t)((clk_id << 16) | (max & 0xffff));
 +              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
 +                                                param);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      if (min > 0) {
 +              param = (uint32_t)((clk_id << 16) | (min & 0xffff));
 +              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
 +                                                param);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +
 +      return ret;
 +}
 +
 +int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 +                         uint32_t *min, uint32_t *max)
 +{
 +      int ret = 0, clk_id = 0;
 +      uint32_t param = 0;
 +
 +      if (!min && !max)
 +              return -EINVAL;
 +
 +      switch (clk_type) {
 +      case SMU_UCLK:
 +              if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
 +                      pr_warn("uclk dpm is not enabled\n");
 +                      return 0;
 +              }
 +              break;
 +      case SMU_GFXCLK:
 +              if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
 +                      pr_warn("gfxclk dpm is not enabled\n");
 +                      return 0;
 +              }
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      mutex_lock(&smu->mutex);
 +      clk_id = smu_clk_get_index(smu, clk_type);
 +      if (clk_id < 0) {
 +              ret = -EINVAL;
 +              goto failed;
 +      }
 +
 +      param = (clk_id & 0xffff) << 16;
 +
 +      if (max) {
 +              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
 +              if (ret)
 +                      goto failed;
 +              ret = smu_read_smc_arg(smu, max);
 +              if (ret)
 +                      goto failed;
 +      }
 +
 +      if (min) {
 +              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
 +              if (ret)
 +                      goto failed;
 +              ret = smu_read_smc_arg(smu, min);
 +              if (ret)
 +                      goto failed;
 +      }
 +
 +failed:
 +      mutex_unlock(&smu->mutex);
 +      return ret;
 +}
 +
 +int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
 +                            uint16_t level, uint32_t *value)
 +{
 +      int ret = 0, clk_id = 0;
 +      uint32_t param;
 +
 +      if (!value)
 +              return -EINVAL;
 +
 +      clk_id = smu_clk_get_index(smu, clk_type);
 +      if (clk_id < 0)
 +              return clk_id;
 +
 +      param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
 +
 +      ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
 +                                        param);
 +      if (ret)
 +              return ret;
 +
 +      ret = smu_read_smc_arg(smu, &param);
 +      if (ret)
 +              return ret;
 +
 +      /* BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
 +       * now, we un-support it */
 +      *value = param & 0x7fffffff;
 +
 +      return ret;
 +}
 +
 +int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
 +                          uint32_t *value)
 +{
 +      return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
 +}
 +
  int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
                           bool gate)
  {
        case AMD_IP_BLOCK_TYPE_VCE:
                ret = smu_dpm_set_vce_enable(smu, gate);
                break;
 +      case AMD_IP_BLOCK_TYPE_GFX:
 +              ret = smu_gfx_off_control(smu, gate);
 +              break;
        default:
                break;
        }
@@@ -280,14 -116,6 +280,14 @@@ int smu_common_read_sensor(struct smu_c
                ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
                *size = 8;
                break;
 +      case AMDGPU_PP_SENSOR_UVD_POWER:
 +              *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
 +              *size = 4;
 +              break;
 +      case AMDGPU_PP_SENSOR_VCE_POWER:
 +              *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
 +              *size = 4;
 +              break;
        default:
                ret = -EINVAL;
                break;
        return ret;
  }
  
 -int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
 +int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
                     void *table_data, bool drv2smu)
  {
        struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *table = NULL;
        int ret = 0;
 -      uint32_t table_index;
 +      int table_id = smu_table_get_index(smu, table_index);
  
        if (!table_data || table_id >= smu_table->table_count)
                return -EINVAL;
  
 -      table_index = (exarg << 16) | table_id;
 -
 -      table = &smu_table->tables[table_id];
 +      table = &smu_table->tables[table_index];
  
        if (drv2smu)
                memcpy(table->cpu_addr, table_data, table->size);
        ret = smu_send_smc_msg_with_param(smu, drv2smu ?
                                          SMU_MSG_TransferTableDram2Smu :
                                          SMU_MSG_TransferTableSmu2Dram,
 -                                        table_index);
 +                                        table_id);
        if (ret)
                return ret;
  
  
  bool is_support_sw_smu(struct amdgpu_device *adev)
  {
 -      if (amdgpu_dpm != 1)
 -              return false;
 -
 -      if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
 +      if (adev->asic_type == CHIP_VEGA20)
 +              return (amdgpu_dpm == 2) ? true : false;
 +      else if (adev->asic_type >= CHIP_NAVI10)
                return true;
 -
 -      return false;
 +      else
 +              return false;
  }
  
  int smu_sys_get_pp_table(struct smu_context *smu, void **table)
@@@ -402,36 -233,33 +402,36 @@@ int smu_feature_init_dpm(struct smu_con
  {
        struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
 -      uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
 +      uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
  
        if (!smu->pm_enabled)
                return ret;
        mutex_lock(&feature->mutex);
 -      bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
 +      bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
        mutex_unlock(&feature->mutex);
  
 -      ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
 +      ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
                                             SMU_FEATURE_MAX/32);
        if (ret)
                return ret;
  
        mutex_lock(&feature->mutex);
 -      bitmap_andnot(feature->allowed, feature->allowed,
 -                    (unsigned long *)unallowed_feature_mask,
 +      bitmap_or(feature->allowed, feature->allowed,
 +                    (unsigned long *)allowed_feature_mask,
                      feature->feature_num);
        mutex_unlock(&feature->mutex);
  
        return ret;
  }
  
 -int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
 +int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
  {
        struct smu_feature *feature = &smu->smu_feature;
 +      uint32_t feature_id;
        int ret = 0;
  
 +      feature_id = smu_feature_get_index(smu, mask);
 +
        WARN_ON(feature_id > feature->feature_num);
  
        mutex_lock(&feature->mutex);
        return ret;
  }
  
 -int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
 +int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
 +                          bool enable)
  {
        struct smu_feature *feature = &smu->smu_feature;
 +      uint32_t feature_id;
        int ret = 0;
  
 +      feature_id = smu_feature_get_index(smu, mask);
 +
        WARN_ON(feature_id > feature->feature_num);
  
        mutex_lock(&feature->mutex);
@@@ -468,14 -292,11 +468,14 @@@ failed
        return ret;
  }
  
 -int smu_feature_is_supported(struct smu_context *smu, int feature_id)
 +int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
  {
        struct smu_feature *feature = &smu->smu_feature;
 +      uint32_t feature_id;
        int ret = 0;
  
 +      feature_id = smu_feature_get_index(smu, mask);
 +
        WARN_ON(feature_id > feature->feature_num);
  
        mutex_lock(&feature->mutex);
        return ret;
  }
  
 -int smu_feature_set_supported(struct smu_context *smu, int feature_id,
 +int smu_feature_set_supported(struct smu_context *smu,
 +                            enum smu_feature_mask mask,
                              bool enable)
  {
        struct smu_feature *feature = &smu->smu_feature;
 +      uint32_t feature_id;
        int ret = 0;
  
 +      feature_id = smu_feature_get_index(smu, mask);
 +
        WARN_ON(feature_id > feature->feature_num);
  
        mutex_lock(&feature->mutex);
@@@ -513,7 -330,7 +513,7 @@@ static int smu_set_funcs(struct amdgpu_
  
        switch (adev->asic_type) {
        case CHIP_VEGA20:
 -              adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
 +      case CHIP_NAVI10:
                if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
                        smu->od_enabled = true;
                smu_v11_0_set_smu_funcs(smu);
@@@ -805,17 -622,21 +805,17 @@@ static int smu_smc_table_hw_init(struc
                return 0;
        }
  
 -      ret = smu_init_display(smu);
 +      ret = smu_init_display_count(smu, 0);
        if (ret)
                return ret;
  
        if (initialize) {
 -              ret = smu_read_pptable_from_vbios(smu);
 -              if (ret)
 -                      return ret;
 -
                /* get boot_values from vbios to set revision, gfxclk, and etc. */
                ret = smu_get_vbios_bootup_values(smu);
                if (ret)
                        return ret;
  
 -              ret = smu_get_clk_info_from_vbios(smu);
 +              ret = smu_setup_pptable(smu);
                if (ret)
                        return ret;
  
                        return ret;
        }
  
 -      ret = smu_set_od8_default_settings(smu, initialize);
 +      ret = smu_set_default_od_settings(smu, initialize);
        if (ret)
                return ret;
  
@@@ -998,14 -819,20 +998,14 @@@ static int smu_hw_init(void *handle
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
  
 -      if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 -              ret = smu_load_microcode(smu);
 -              if (ret)
 +      if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 +              ret = smu_check_fw_status(smu);
 +              if (ret) {
 +                      pr_err("SMC firmware status is not correct\n");
                        return ret;
 +              }
        }
  
 -      ret = smu_check_fw_status(smu);
 -      if (ret) {
 -              pr_err("SMC firmware status is not correct\n");
 -              return ret;
 -      }
 -
 -      mutex_lock(&smu->mutex);
 -
        ret = smu_feature_init_dpm(smu);
        if (ret)
                goto failed;
        if (ret)
                goto failed;
  
 -      mutex_unlock(&smu->mutex);
 +      ret = smu_register_irq_handler(smu);
 +      if (ret)
 +              goto failed;
  
        if (!smu->pm_enabled)
                adev->pm.dpm_enabled = false;
        else
 -              adev->pm.dpm_enabled = true;
 +              adev->pm.dpm_enabled = true;    /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
  
        pr_info("SMU is initialized successfully!\n");
  
@@@ -1061,11 -886,20 +1061,11 @@@ static int smu_hw_fini(void *handle
        kfree(table_context->max_sustainable_clocks);
        table_context->max_sustainable_clocks = NULL;
  
 -      kfree(table_context->od_feature_capabilities);
 -      table_context->od_feature_capabilities = NULL;
 -
 -      kfree(table_context->od_settings_max);
 -      table_context->od_settings_max = NULL;
 -
 -      kfree(table_context->od_settings_min);
 -      table_context->od_settings_min = NULL;
 -
        kfree(table_context->overdrive_table);
        table_context->overdrive_table = NULL;
  
 -      kfree(table_context->od8_settings);
 -      table_context->od8_settings = NULL;
 +      kfree(smu->irq_source);
 +      smu->irq_source = NULL;
  
        ret = smu_fini_fb_allocations(smu);
        if (ret)
@@@ -1106,10 -940,6 +1106,10 @@@ static int smu_suspend(void *handle
  
        smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
  
 +      if (adev->asic_type >= CHIP_NAVI10 &&
 +          adev->gfx.rlc.funcs->stop)
 +              adev->gfx.rlc.funcs->stop(adev);
 +
        return 0;
  }
  
@@@ -1420,61 -1250,6 +1420,61 @@@ int smu_handle_task(struct smu_context 
        return ret;
  }
  
 +enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
 +{
 +      struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 +
 +      if (!smu_dpm_ctx->dpm_context)
 +              return -EINVAL;
 +
 +      mutex_lock(&(smu->mutex));
 +      if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
 +              smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
 +      }
 +      mutex_unlock(&(smu->mutex));
 +
 +      return smu_dpm_ctx->dpm_level;
 +}
 +
 +int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
 +{
 +      int ret = 0;
 +      int i;
 +      struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 +
 +      if (!smu_dpm_ctx->dpm_context)
 +              return -EINVAL;
 +
 +      for (i = 0; i < smu->adev->num_ip_blocks; i++) {
 +              if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
 +                      break;
 +      }
 +
 +
 +      smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
 +      ret = smu_handle_task(smu, level,
 +                            AMD_PP_TASK_READJUST_POWER_STATE);
 +      if (ret)
 +              return ret;
 +
 +      mutex_lock(&smu->mutex);
 +      smu_dpm_ctx->dpm_level = level;
 +      mutex_unlock(&smu->mutex);
 +
 +      return ret;
 +}
 +
 +int smu_set_display_count(struct smu_context *smu, uint32_t count)
 +{
 +      int ret = 0;
 +
 +      mutex_lock(&smu->mutex);
 +      ret = smu_init_display_count(smu, count);
 +      mutex_unlock(&smu->mutex);
 +
 +      return ret;
 +}
 +
  const struct amd_ip_funcs smu_ip_funcs = {
        .name = "smu",
        .early_init = smu_early_init,
   * OTHER DEALINGS IN THE SOFTWARE.
   */
  
- #include "pp_debug.h"
  #include <linux/firmware.h>
+ #include <linux/module.h>
++#include <linux/pci.h>
+ #include "pp_debug.h"
  #include "amdgpu.h"
  #include "amdgpu_smu.h"
  #include "atomfirmware.h"
  #include "amdgpu_atomfirmware.h"
  #include "smu_v11_0.h"
 -#include "smu11_driver_if.h"
  #include "soc15_common.h"
  #include "atom.h"
  #include "vega20_ppt.h"
 -#include "pp_thermal.h"
 +#include "navi10_ppt.h"
  
  #include "asic_reg/thm/thm_11_0_2_offset.h"
  #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
 -#include "asic_reg/mp/mp_9_0_offset.h"
 -#include "asic_reg/mp/mp_9_0_sh_mask.h"
 +#include "asic_reg/mp/mp_11_0_offset.h"
 +#include "asic_reg/mp/mp_11_0_sh_mask.h"
  #include "asic_reg/nbio/nbio_7_4_offset.h"
 -#include "asic_reg/smuio/smuio_9_0_offset.h"
 -#include "asic_reg/smuio/smuio_9_0_sh_mask.h"
 +#include "asic_reg/smuio/smuio_11_0_0_offset.h"
 +#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
  
  MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
 +MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
  
 -#define SMU11_TOOL_SIZE               0x19000
 -#define SMU11_THERMAL_MINIMUM_ALERT_TEMP      0
 -#define SMU11_THERMAL_MAXIMUM_ALERT_TEMP      255
 -
 -#define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
  #define SMU11_VOLTAGE_SCALE 4
  
 -#define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
 -                       FEATURE_DPM_GFXCLK_MASK | \
 -                       FEATURE_DPM_UCLK_MASK | \
 -                       FEATURE_DPM_SOCCLK_MASK | \
 -                       FEATURE_DPM_UVD_MASK | \
 -                       FEATURE_DPM_VCE_MASK | \
 -                       FEATURE_DPM_MP0CLK_MASK | \
 -                       FEATURE_DPM_LINK_MASK | \
 -                       FEATURE_DPM_DCEFCLK_MASK)
 -
  static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
                                              uint16_t msg)
  {
@@@ -150,9 -167,6 +153,9 @@@ static int smu_v11_0_init_microcode(str
        case CHIP_VEGA20:
                chip_name = "vega20";
                break;
 +      case CHIP_NAVI10:
 +              chip_name = "navi10";
 +              break;
        default:
                BUG();
        }
@@@ -191,39 -205,6 +194,39 @@@ out
  
  static int smu_v11_0_load_microcode(struct smu_context *smu)
  {
 +      struct amdgpu_device *adev = smu->adev;
 +      const uint32_t *src;
 +      const struct smc_firmware_header_v1_0 *hdr;
 +      uint32_t addr_start = MP1_SRAM;
 +      uint32_t i;
 +      uint32_t mp1_fw_flags;
 +
 +      hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
 +      src = (const uint32_t *)(adev->pm.fw->data +
 +              le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 +
 +      for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
 +              WREG32_PCIE(addr_start, src[i]);
 +              addr_start += 4;
 +      }
 +
 +      WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
 +              1 & MP1_SMN_PUB_CTRL__RESET_MASK);
 +      WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
 +              1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
 +
 +      for (i = 0; i < adev->usec_timeout; i++) {
 +              mp1_fw_flags = RREG32_PCIE(MP1_Public |
 +                      (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
 +              if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
 +                      MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
 +                      break;
 +              udelay(1);
 +      }
 +
 +      if (i == adev->usec_timeout)
 +              return -ETIME;
 +
        return 0;
  }
  
@@@ -257,10 -238,8 +260,10 @@@ static int smu_v11_0_check_fw_version(s
        smu_minor = (smu_version >> 8) & 0xff;
        smu_debug = (smu_version >> 0) & 0xff;
  
 -      pr_info("SMU Driver IF Version = 0x%08x, SMU FW Version = 0x%08x (%d.%d.%d)\n",
 -              if_version, smu_version, smu_major, smu_minor, smu_debug);
 +      pr_info("SMU Driver IF Version = 0x%08x, SMU FW IF Version = 0x%08x,"
 +              " SMU FW Version = 0x%08x (%d.%d.%d)\n",
 +              if_version, smu->smc_if_version,
 +              smu_version, smu_major, smu_minor, smu_debug);
  
        if (if_version != smu->smc_if_version) {
                pr_err("SMU driver if version not matched\n");
        return ret;
  }
  
 -static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
 +static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
 +{
 +      struct amdgpu_device *adev = smu->adev;
 +      uint32_t ppt_offset_bytes;
 +      const struct smc_firmware_header_v2_0 *v2;
 +
 +      v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
 +
 +      ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
 +      *size = le32_to_cpu(v2->ppt_size_bytes);
 +      *table = (uint8_t *)v2 + ppt_offset_bytes;
 +
 +      return 0;
 +}
 +
 +static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, uint32_t *size, uint32_t pptable_id)
 +{
 +      struct amdgpu_device *adev = smu->adev;
 +      const struct smc_firmware_header_v2_1 *v2_1;
 +      struct smc_soft_pptable_entry *entries;
 +      uint32_t pptable_count = 0;
 +      int i = 0;
 +
 +      v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
 +      entries = (struct smc_soft_pptable_entry *)
 +              ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
 +      pptable_count = le32_to_cpu(v2_1->pptable_count);
 +      for (i = 0; i < pptable_count; i++) {
 +              if (le32_to_cpu(entries[i].id) == pptable_id) {
 +                      *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
 +                      *size = le32_to_cpu(entries[i].ppt_size_bytes);
 +                      break;
 +              }
 +      }
 +
 +      if (i == pptable_count)
 +              return -EINVAL;
 +
 +      return 0;
 +}
 +
 +static int smu_v11_0_setup_pptable(struct smu_context *smu)
  {
 +      struct amdgpu_device *adev = smu->adev;
 +      const struct smc_firmware_header_v1_0 *hdr;
        int ret, index;
 -      uint16_t size;
 +      uint32_t size;
        uint8_t frev, crev;
        void *table;
 +      uint16_t version_major, version_minor;
  
 -      index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 -                                          powerplayinfo);
 +      hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
 +      version_major = le16_to_cpu(hdr->header.header_version_major);
 +      version_minor = le16_to_cpu(hdr->header.header_version_minor);
 +      if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
 +              switch (version_minor) {
 +              case 0:
 +                      ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
 +                      break;
 +              case 1:
 +                      ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
 +                                                       smu->smu_table.boot_values.pp_table_id);
 +                      break;
 +              default:
 +                      ret = -EINVAL;
 +                      break;
 +              }
 +              if (ret)
 +                      return ret;
  
 -      ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
 -                                    (uint8_t **)&table);
 -      if (ret)
 -              return ret;
 +      } else {
 +              index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
 +                                                  powerplayinfo);
 +
 +              ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev,
 +                                            (uint8_t **)&table);
 +              if (ret)
 +                      return ret;
 +      }
  
        if (!smu->smu_table.power_play_table)
                smu->smu_table.power_play_table = table;
@@@ -394,19 -308,30 +397,19 @@@ static int smu_v11_0_init_smc_tables(st
        struct smu_table *tables = NULL;
        int ret = 0;
  
 -      if (smu_table->tables || smu_table->table_count != 0)
 +      if (smu_table->tables || smu_table->table_count == 0)
                return -EINVAL;
  
 -      tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
 +      tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
 +                       GFP_KERNEL);
        if (!tables)
                return -ENOMEM;
  
        smu_table->tables = tables;
 -      smu_table->table_count = TABLE_COUNT;
 -
 -      SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
 -                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 -      SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
 -                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 -      SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
 -                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 -      SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
 -                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 -      SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
 -                     AMDGPU_GEM_DOMAIN_VRAM);
 -      SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF,
 -                     sizeof(DpmActivityMonitorCoeffInt_t),
 -                     PAGE_SIZE,
 -                     AMDGPU_GEM_DOMAIN_VRAM);
 +
 +      ret = smu_tables_init(smu, tables);
 +      if (ret)
 +              return ret;
  
        ret = smu_v11_0_init_dpm_context(smu);
        if (ret)
@@@ -424,11 -349,8 +427,11 @@@ static int smu_v11_0_fini_smc_tables(st
                return -EINVAL;
  
        kfree(smu_table->tables);
 +      kfree(smu_table->metrics_table);
        smu_table->tables = NULL;
        smu_table->table_count = 0;
 +      smu_table->metrics_table = NULL;
 +      smu_table->metrics_time = 0;
  
        ret = smu_v11_0_fini_dpm_context(smu);
        if (ret)
@@@ -451,6 -373,13 +454,6 @@@ static int smu_v11_0_init_power(struct 
                return -ENOMEM;
        smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
  
 -      smu->metrics_time = 0;
 -      smu->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
 -      if (!smu->metrics_table) {
 -              kfree(smu_power->power_context);
 -              return -ENOMEM;
 -      }
 -
        return 0;
  }
  
@@@ -463,7 -392,9 +466,7 @@@ static int smu_v11_0_fini_power(struct 
        if (!smu_power->power_context || smu_power->power_context_size == 0)
                return -EINVAL;
  
 -      kfree(smu->metrics_table);
        kfree(smu_power->power_context);
 -      smu->metrics_table = NULL;
        smu_power->power_context = NULL;
        smu_power->power_context_size = 0;
  
@@@ -666,12 -597,11 +669,12 @@@ static int smu_v11_0_parse_pptable(stru
        int ret;
  
        struct smu_table_context *table_context = &smu->smu_table;
 +      struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
  
        if (table_context->driver_pptable)
                return -EINVAL;
  
 -      table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
 +      table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
  
        if (!table_context->driver_pptable)
                return -ENOMEM;
@@@ -699,29 -629,15 +702,29 @@@ static int smu_v11_0_write_pptable(stru
        struct smu_table_context *table_context = &smu->smu_table;
        int ret = 0;
  
 -      ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true);
 +      ret = smu_update_table(smu, SMU_TABLE_PPTABLE,
 +                             table_context->driver_pptable, true);
  
        return ret;
  }
  
  static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
  {
 -      return smu_update_table(smu, TABLE_WATERMARKS,
 -                              smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true);
 +      int ret = 0;
 +      struct smu_table_context *smu_table = &smu->smu_table;
 +      struct smu_table *table = NULL;
 +
 +      table = &smu_table->tables[SMU_TABLE_WATERMARKS];
 +      if (!table)
 +              return -EINVAL;
 +
 +      if (!table->cpu_addr)
 +              return -EINVAL;
 +
 +      ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, table->cpu_addr,
 +                              true);
 +
 +      return ret;
  }
  
  static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
@@@ -752,7 -668,7 +755,7 @@@ static int smu_v11_0_set_min_dcef_deep_
  static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
  {
        int ret = 0;
 -      struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
 +      struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
  
        if (tool_table->mc_address) {
                ret = smu_send_smc_msg_with_param(smu,
        return ret;
  }
  
 -static int smu_v11_0_init_display(struct smu_context *smu)
 +static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
  {
        int ret = 0;
  
        if (!smu->pm_enabled)
                return ret;
 -      ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
 +
 +      ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
        return ret;
  }
  
@@@ -873,6 -788,17 +876,6 @@@ static int smu_v11_0_get_enabled_mask(s
        return ret;
  }
  
 -static bool smu_v11_0_is_dpm_running(struct smu_context *smu)
 -{
 -      int ret = 0;
 -      uint32_t feature_mask[2];
 -      unsigned long feature_enabled;
 -      ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2);
 -      feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
 -                         ((uint64_t)feature_mask[1] << 32));
 -      return !!(feature_enabled & SMC_DPM_FEATURE);
 -}
 -
  static int smu_v11_0_system_features_control(struct smu_context *smu,
                                             bool en)
  {
@@@ -905,23 -831,22 +908,23 @@@ static int smu_v11_0_notify_display_cha
  
        if (!smu->pm_enabled)
                return ret;
 -      if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
 -          ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
 +      if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
 +          smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
 +              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
  
        return ret;
  }
  
  static int
  smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
 -                                  PPCLK_e clock_select)
 +                                  enum smu_clk_type clock_select)
  {
        int ret = 0;
  
        if (!smu->pm_enabled)
                return ret;
        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
 -                                        clock_select << 16);
 +                                        smu_clk_get_index(smu, clock_select) << 16);
        if (ret) {
                pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
                return ret;
  
        /* if DC limit is zero, return AC limit */
        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
 -                                        clock_select << 16);
 +                                        smu_clk_get_index(smu, clock_select) << 16);
        if (ret) {
                pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
                return ret;
@@@ -963,10 -888,10 +966,10 @@@ static int smu_v11_0_init_max_sustainab
        max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
        max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
  
 -      if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
 +      if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_get_max_sustainable_clock(smu,
                                                          &(max_sustainable_clocks->uclock),
 -                                                        PPCLK_UCLK);
 +                                                        SMU_UCLK);
                if (ret) {
                        pr_err("[%s] failed to get max UCLK from SMC!",
                               __func__);
                }
        }
  
 -      if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
 +      if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                ret = smu_v11_0_get_max_sustainable_clock(smu,
                                                          &(max_sustainable_clocks->soc_clock),
 -                                                        PPCLK_SOCCLK);
 +                                                        SMU_SOCCLK);
                if (ret) {
                        pr_err("[%s] failed to get max SOCCLK from SMC!",
                               __func__);
                }
        }
  
 -      if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
 +      if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_get_max_sustainable_clock(smu,
                                                          &(max_sustainable_clocks->dcef_clock),
 -                                                        PPCLK_DCEFCLK);
 +                                                        SMU_DCEFCLK);
                if (ret) {
                        pr_err("[%s] failed to get max DCEFCLK from SMC!",
                               __func__);
  
                ret = smu_v11_0_get_max_sustainable_clock(smu,
                                                          &(max_sustainable_clocks->display_clock),
 -                                                        PPCLK_DISPCLK);
 +                                                        SMU_DISPCLK);
                if (ret) {
                        pr_err("[%s] failed to get max DISPCLK from SMC!",
                               __func__);
                }
                ret = smu_v11_0_get_max_sustainable_clock(smu,
                                                          &(max_sustainable_clocks->phy_clock),
 -                                                        PPCLK_PHYCLK);
 +                                                        SMU_PHYCLK);
                if (ret) {
                        pr_err("[%s] failed to get max PHYCLK from SMC!",
                               __func__);
                }
                ret = smu_v11_0_get_max_sustainable_clock(smu,
                                                          &(max_sustainable_clocks->pixel_clock),
 -                                                        PPCLK_PIXCLK);
 +                                                        SMU_PIXCLK);
                if (ret) {
                        pr_err("[%s] failed to get max PIXCLK from SMC!",
                               __func__);
@@@ -1043,7 -968,7 +1046,7 @@@ static int smu_v11_0_get_power_limit(st
                mutex_unlock(&smu->mutex);
        } else {
                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
 -                                                POWER_SOURCE_AC << 16);
 +                      smu_power_get_index(smu, SMU_POWER_SOURCE_AC) << 16);
                if (ret) {
                        pr_err("[%s] get PPT limit failed!", __func__);
                        return ret;
@@@ -1070,7 -995,7 +1073,7 @@@ static int smu_v11_0_set_power_limit(st
                max_power_limit /= 100;
        }
  
 -      if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT))
 +      if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
        if (ret) {
                pr_err("[%s] Set power limit Failed!", __func__);
        return ret;
  }
  
 -static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
 +static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
 +                                        enum smu_clk_type clk_id,
 +                                        uint32_t *value)
  {
        int ret = 0;
        uint32_t freq;
  
 -      if (clk_id >= PPCLK_COUNT || !value)
 +      if (clk_id >= SMU_CLK_COUNT || !value)
                return -EINVAL;
  
 -      ret = smu_send_smc_msg_with_param(smu,
 -                      SMU_MSG_GetDpmClockFreq, (clk_id << 16));
 -      if (ret)
 -              return ret;
 +      /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
 +      if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) == 0)
 +              ret =  smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
 +      else {
 +              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
 +                                                (smu_clk_get_index(smu, clk_id) << 16));
 +              if (ret)
 +                      return ret;
  
 -      ret = smu_read_smc_arg(smu, &freq);
 -      if (ret)
 -              return ret;
 +              ret = smu_read_smc_arg(smu, &freq);
 +              if (ret)
 +                      return ret;
 +      }
  
        freq *= 100;
        *value = freq;
        return ret;
  }
  
 -static int smu_v11_0_get_thermal_range(struct smu_context *smu,
 -                              struct PP_TemperatureRange *range)
 -{
 -      PPTable_t *pptable = smu->smu_table.driver_pptable;
 -      memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
 -
 -      range->max = pptable->TedgeLimit *
 -              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -      range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
 -              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -      range->hotspot_crit_max = pptable->ThotspotLimit *
 -              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -      range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
 -              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -      range->mem_crit_max = pptable->ThbmLimit *
 -              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -      range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
 -              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -
 -      return 0;
 -}
 -
  static int smu_v11_0_set_thermal_range(struct smu_context *smu,
 -                      struct PP_TemperatureRange *range)
 +                                     struct smu_temperature_range *range)
  {
        struct amdgpu_device *adev = smu->adev;
 -      int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
 -              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -      int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
 -              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      int low = SMU_THERMAL_MINIMUM_ALERT_TEMP *
 +              SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP *
 +              SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
        uint32_t val;
  
 +      if (!range)
 +              return -EINVAL;
 +
        if (low < range->min)
                low = range->min;
        if (high > range->max)
        val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
 -      val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
 -      val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
 +      val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
 +      val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
 +      val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
 +      val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
        val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
  
        WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
@@@ -1159,10 -1094,22 +1162,10 @@@ static int smu_v11_0_enable_thermal_ale
        return 0;
  }
  
 -static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
 -{
 -      int ret;
 -      struct smu_table_context *table_context = &smu->smu_table;
 -      PPTable_t *pptable = table_context->driver_pptable;
 -
 -      ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
 -                      (uint32_t)pptable->FanTargetTemperature);
 -
 -      return ret;
 -}
 -
  static int smu_v11_0_start_thermal_control(struct smu_context *smu)
  {
        int ret = 0;
 -      struct PP_TemperatureRange range = {
 +      struct smu_temperature_range range = {
                TEMP_RANGE_MIN,
                TEMP_RANGE_MAX,
                TEMP_RANGE_MAX,
  
        if (!smu->pm_enabled)
                return ret;
 -      smu_v11_0_get_thermal_range(smu, &range);
 +      ret = smu_get_thermal_temperature_range(smu, &range);
  
        if (smu->smu_table.thermal_controller_type) {
                ret = smu_v11_0_set_thermal_range(smu, &range);
                ret = smu_v11_0_enable_thermal_alert(smu);
                if (ret)
                        return ret;
 -              ret = smu_v11_0_set_thermal_fan_table(smu);
 +
 +              ret = smu_set_thermal_fan_table(smu);
                if (ret)
                        return ret;
        }
        return ret;
  }
  
 -static int smu_v11_0_get_metrics_table(struct smu_context *smu,
 -              SmuMetrics_t *metrics_table)
 -{
 -      int ret = 0;
 -
 -      if (!smu->metrics_time || time_after(jiffies, smu->metrics_time + HZ / 1000)) {
 -              ret = smu_update_table(smu, TABLE_SMU_METRICS,
 -                              (void *)metrics_table, false);
 -              if (ret) {
 -                      pr_info("Failed to export SMU metrics table!\n");
 -                      return ret;
 -              }
 -              memcpy(smu->metrics_table, metrics_table, sizeof(SmuMetrics_t));
 -              smu->metrics_time = jiffies;
 -      } else
 -              memcpy(metrics_table, smu->metrics_table, sizeof(SmuMetrics_t));
 -
 -      return ret;
 -}
 -
 -static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
 -                                                enum amd_pp_sensors sensor,
 -                                                uint32_t *value)
 -{
 -      int ret = 0;
 -      SmuMetrics_t metrics;
 -
 -      if (!value)
 -              return -EINVAL;
 -
 -      ret = smu_v11_0_get_metrics_table(smu, &metrics);
 -      if (ret)
 -              return ret;
 -
 -      switch (sensor) {
 -      case AMDGPU_PP_SENSOR_GPU_LOAD:
 -              *value = metrics.AverageGfxActivity;
 -              break;
 -      case AMDGPU_PP_SENSOR_MEM_LOAD:
 -              *value = metrics.AverageUclkActivity;
 -              break;
 -      default:
 -              pr_err("Invalid sensor for retrieving clock activity\n");
 -              return -EINVAL;
 -      }
 -
 -      return 0;
 -}
 -
 -static int smu_v11_0_thermal_get_temperature(struct smu_context *smu,
 -                                           enum amd_pp_sensors sensor,
 -                                           uint32_t *value)
 -{
 -      struct amdgpu_device *adev = smu->adev;
 -      SmuMetrics_t metrics;
 -      uint32_t temp = 0;
 -      int ret = 0;
 -
 -      if (!value)
 -              return -EINVAL;
 -
 -      ret = smu_v11_0_get_metrics_table(smu, &metrics);
 -      if (ret)
 -              return ret;
 -
 -      switch (sensor) {
 -      case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
 -              temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
 -              temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
 -                              CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
 -
 -              temp = temp & 0x1ff;
 -              temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -
 -              *value = temp;
 -              break;
 -      case AMDGPU_PP_SENSOR_EDGE_TEMP:
 -              *value = metrics.TemperatureEdge *
 -                      PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -              break;
 -      case AMDGPU_PP_SENSOR_MEM_TEMP:
 -              *value = metrics.TemperatureHBM *
 -                      PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 -              break;
 -      default:
 -              pr_err("Invalid sensor for retrieving temp\n");
 -              return -EINVAL;
 -      }
 -
 -      return 0;
 -}
 -
 -static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
 -{
 -      int ret = 0;
 -      SmuMetrics_t metrics;
 -
 -      if (!value)
 -              return -EINVAL;
 -
 -      ret = smu_v11_0_get_metrics_table(smu, &metrics);
 -      if (ret)
 -              return ret;
 -
 -      *value = metrics.CurrSocketPower << 8;
 -
 -      return 0;
 -}
 -
  static uint16_t convert_to_vddc(uint8_t vid)
  {
        return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
@@@ -1233,33 -1288,60 +1236,33 @@@ static int smu_v11_0_read_sensor(struc
                                 enum amd_pp_sensors sensor,
                                 void *data, uint32_t *size)
  {
 -      struct smu_table_context *table_context = &smu->smu_table;
 -      PPTable_t *pptable = table_context->driver_pptable;
        int ret = 0;
        switch (sensor) {
 -      case AMDGPU_PP_SENSOR_GPU_LOAD:
 -      case AMDGPU_PP_SENSOR_MEM_LOAD:
 -              ret = smu_v11_0_get_current_activity_percent(smu,
 -                                                           sensor,
 -                                                           (uint32_t *)data);
 -              *size = 4;
 -              break;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
 -              ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data);
 +              ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GFX_SCLK:
 -              ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
 -              *size = 4;
 -              break;
 -      case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
 -      case AMDGPU_PP_SENSOR_EDGE_TEMP:
 -      case AMDGPU_PP_SENSOR_MEM_TEMP:
 -              ret = smu_v11_0_thermal_get_temperature(smu, sensor, (uint32_t *)data);
 -              *size = 4;
 -              break;
 -      case AMDGPU_PP_SENSOR_GPU_POWER:
 -              ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data);
 +              ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_VDDGFX:
                ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
                *size = 4;
                break;
 -      case AMDGPU_PP_SENSOR_UVD_POWER:
 -              *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0;
 -              *size = 4;
 -              break;
 -      case AMDGPU_PP_SENSOR_VCE_POWER:
 -              *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0;
 -              *size = 4;
 -              break;
        case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
                *(uint32_t *)data = 0;
                *size = 4;
                break;
 -      case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
 -              *(uint32_t *)data = pptable->FanMaximumRpm;
 -              *size = 4;
 -              break;
        default:
                ret = smu_common_read_sensor(smu, sensor, data, size);
                break;
        }
  
 +      /* try get sensor data by asic */
 +      if (ret)
 +              ret = smu_asic_read_sensor(smu, sensor, data, size);
 +
        if (ret)
                *size = 0;
  
@@@ -1273,29 -1355,24 +1276,29 @@@ smu_v11_0_display_clock_voltage_request
  {
        enum amd_pp_clock_type clk_type = clock_req->clock_type;
        int ret = 0;
 -      PPCLK_e clk_select = 0;
 +      enum smu_clk_type clk_select = 0;
        uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
  
        if (!smu->pm_enabled)
                return -EINVAL;
 -      if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
 +
 +      if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
 +              smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                switch (clk_type) {
                case amd_pp_dcef_clock:
 -                      clk_select = PPCLK_DCEFCLK;
 +                      clk_select = SMU_DCEFCLK;
                        break;
                case amd_pp_disp_clock:
 -                      clk_select = PPCLK_DISPCLK;
 +                      clk_select = SMU_DISPCLK;
                        break;
                case amd_pp_pixel_clock:
 -                      clk_select = PPCLK_PIXCLK;
 +                      clk_select = SMU_PIXCLK;
                        break;
                case amd_pp_phy_clock:
 -                      clk_select = PPCLK_PHYCLK;
 +                      clk_select = SMU_PHYCLK;
 +                      break;
 +              case amd_pp_mem_clock:
 +                      clk_select = SMU_UCLK;
                        break;
                default:
                        pr_info("[%s] Invalid Clock Type!", __func__);
                if (ret)
                        goto failed;
  
 +              mutex_lock(&smu->mutex);
                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
 -                                                (clk_select << 16) | clk_freq);
 +                      (smu_clk_get_index(smu, clk_select) << 16) | clk_freq);
 +              mutex_unlock(&smu->mutex);
        }
  
  failed:
        return ret;
  }
  
 -static int smu_v11_0_set_watermarks_table(struct smu_context *smu,
 -                                        Watermarks_t *table, struct
 -                                        dm_pp_wm_sets_with_clock_ranges_soc15
 -                                        *clock_ranges)
 -{
 -      int i;
 -
 -      if (!table || !clock_ranges)
 -              return -EINVAL;
 -
 -      if (clock_ranges->num_wm_dmif_sets > 4 ||
 -          clock_ranges->num_wm_mcif_sets > 4)
 -                return -EINVAL;
 -
 -        for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
 -              table->WatermarkRow[1][i].MinClock =
 -                      cpu_to_le16((uint16_t)
 -                      (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
 -                      1000));
 -              table->WatermarkRow[1][i].MaxClock =
 -                      cpu_to_le16((uint16_t)
 -                      (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
 -                      1000));
 -              table->WatermarkRow[1][i].MinUclk =
 -                      cpu_to_le16((uint16_t)
 -                      (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
 -                      1000));
 -              table->WatermarkRow[1][i].MaxUclk =
 -                      cpu_to_le16((uint16_t)
 -                      (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
 -                      1000));
 -              table->WatermarkRow[1][i].WmSetting = (uint8_t)
 -                              clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
 -        }
 -
 -      for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
 -              table->WatermarkRow[0][i].MinClock =
 -                      cpu_to_le16((uint16_t)
 -                      (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
 -                      1000));
 -              table->WatermarkRow[0][i].MaxClock =
 -                      cpu_to_le16((uint16_t)
 -                      (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
 -                      1000));
 -              table->WatermarkRow[0][i].MinUclk =
 -                      cpu_to_le16((uint16_t)
 -                      (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
 -                      1000));
 -              table->WatermarkRow[0][i].MaxUclk =
 -                      cpu_to_le16((uint16_t)
 -                      (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
 -                      1000));
 -              table->WatermarkRow[0][i].WmSetting = (uint8_t)
 -                              clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
 -        }
 -
 -      return 0;
 -}
 -
  static int
  smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
                                          dm_pp_wm_sets_with_clock_ranges_soc15
                                          *clock_ranges)
  {
        int ret = 0;
 -      struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS];
 -      Watermarks_t *table = watermarks->cpu_addr;
 +      struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
 +      void *table = watermarks->cpu_addr;
  
        if (!smu->disable_watermark &&
 -          smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
 -          smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
 -              smu_v11_0_set_watermarks_table(smu, table, clock_ranges);
 +          smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
 +          smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
 +              smu_set_watermarks_table(smu, table, clock_ranges);
                smu->watermarks_bitmap |= WATERMARKS_EXIST;
                smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
        }
        return ret;
  }
  
 -static int smu_v11_0_get_clock_ranges(struct smu_context *smu,
 -                                    uint32_t *clock,
 -                                    PPCLK_e clock_select,
 -                                    bool max)
 +static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
  {
 -      int ret;
 -      *clock = 0;
 -      if (max) {
 -              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
 -                                          (clock_select << 16));
 -              if (ret) {
 -                      pr_err("[GetClockRanges] Failed to get max clock from SMC!\n");
 -                      return ret;
 -              }
 -              smu_read_smc_arg(smu, clock);
 -      } else {
 -              ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq,
 -                                          (clock_select << 16));
 -              if (ret) {
 -                      pr_err("[GetClockRanges] Failed to get min clock from SMC!\n");
 -                      return ret;
 -              }
 -              smu_read_smc_arg(smu, clock);
 -      }
 -
 -      return 0;
 -}
 -
 -static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low)
 -{
 -      uint32_t gfx_clk;
 -      int ret;
 -
 -      if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
 -              pr_err("[GetSclks]: gfxclk dpm not enabled!\n");
 -              return -EPERM;
 -      }
 -
 -      if (low) {
 -              ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false);
 -              if (ret) {
 -                      pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n");
 -                      return ret;
 -              }
 -      } else {
 -              ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true);
 -              if (ret) {
 -                      pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n");
 -                      return ret;
 -              }
 -      }
 -
 -      return (gfx_clk * 100);
 -}
 -
 -static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low)
 -{
 -      uint32_t mem_clk;
 -      int ret;
 -
 -      if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
 -              pr_err("[GetMclks]: memclk dpm not enabled!\n");
 -              return -EPERM;
 -      }
 -
 -      if (low) {
 -              ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false);
 -              if (ret) {
 -                      pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n");
 -                      return ret;
 -              }
 -      } else {
 -              ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true);
 -              if (ret) {
 -                      pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n");
 -                      return ret;
 -              }
 -      }
 -
 -      return (mem_clk * 100);
 -}
 -
 -static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
 -                                            bool initialize)
 -{
 -      struct smu_table_context *table_context = &smu->smu_table;
 -      int ret;
 -
 -      if (initialize) {
 -              if (table_context->overdrive_table)
 -                      return -EINVAL;
 -
 -              table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
 -
 -              if (!table_context->overdrive_table)
 -                      return -ENOMEM;
 -
 -              ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
 -              if (ret) {
 -                      pr_err("Failed to export over drive table!\n");
 -                      return ret;
 -              }
 -
 -              smu_set_default_od8_settings(smu);
 -      }
 -
 -      ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
 -      if (ret) {
 -              pr_err("Failed to import over drive table!\n");
 -              return ret;
 -      }
 -
 -      return 0;
 -}
 +      int ret = 0;
 +      struct amdgpu_device *adev = smu->adev;
  
 -static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
 -{
 -      int pplib_workload = 0;
 -
 -      switch (power_profile) {
 -      case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
 -           pplib_workload = WORKLOAD_DEFAULT_BIT;
 -           break;
 -      case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
 -           pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
 -           break;
 -      case PP_SMC_POWER_PROFILE_POWERSAVING:
 -           pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
 -           break;
 -      case PP_SMC_POWER_PROFILE_VIDEO:
 -           pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
 -           break;
 -      case PP_SMC_POWER_PROFILE_VR:
 -           pplib_workload = WORKLOAD_PPLIB_VR_BIT;
 -           break;
 -      case PP_SMC_POWER_PROFILE_COMPUTE:
 -           pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
 -           break;
 -      case PP_SMC_POWER_PROFILE_CUSTOM:
 -              pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
 +      switch (adev->asic_type) {
 +      case CHIP_VEGA20:
 +              break;
 +      case CHIP_NAVI10:
 +              if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
 +                      return 0;
 +              mutex_lock(&smu->mutex);
 +              if (enable)
 +                      ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
 +              else
 +                      ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
 +              mutex_unlock(&smu->mutex);
 +              break;
 +      default:
                break;
        }
  
 -      return pplib_workload;
 -}
 -
 -static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
 -{
 -      DpmActivityMonitorCoeffInt_t activity_monitor;
 -      uint32_t i, size = 0;
 -      uint16_t workload_type = 0;
 -      static const char *profile_name[] = {
 -                                      "BOOTUP_DEFAULT",
 -                                      "3D_FULL_SCREEN",
 -                                      "POWER_SAVING",
 -                                      "VIDEO",
 -                                      "VR",
 -                                      "COMPUTE",
 -                                      "CUSTOM"};
 -      static const char *title[] = {
 -                      "PROFILE_INDEX(NAME)",
 -                      "CLOCK_TYPE(NAME)",
 -                      "FPS",
 -                      "UseRlcBusy",
 -                      "MinActiveFreqType",
 -                      "MinActiveFreq",
 -                      "BoosterFreqType",
 -                      "BoosterFreq",
 -                      "PD_Data_limit_c",
 -                      "PD_Data_error_coeff",
 -                      "PD_Data_error_rate_coeff"};
 -      int result = 0;
 -
 -      if (!smu->pm_enabled || !buf)
 -              return -EINVAL;
 -
 -      size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
 -                      title[0], title[1], title[2], title[3], title[4], title[5],
 -                      title[6], title[7], title[8], title[9], title[10]);
 -
 -      for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
 -              /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
 -              workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
 -              result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
 -                                                 workload_type, &activity_monitor, false);
 -              if (result) {
 -                      pr_err("[%s] Failed to get activity monitor!", __func__);
 -                      return result;
 -              }
 -
 -              size += sprintf(buf + size, "%2d %14s%s:\n",
 -                      i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
 -
 -              size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
 -                      " ",
 -                      0,
 -                      "GFXCLK",
 -                      activity_monitor.Gfx_FPS,
 -                      activity_monitor.Gfx_UseRlcBusy,
 -                      activity_monitor.Gfx_MinActiveFreqType,
 -                      activity_monitor.Gfx_MinActiveFreq,
 -                      activity_monitor.Gfx_BoosterFreqType,
 -                      activity_monitor.Gfx_BoosterFreq,
 -                      activity_monitor.Gfx_PD_Data_limit_c,
 -                      activity_monitor.Gfx_PD_Data_error_coeff,
 -                      activity_monitor.Gfx_PD_Data_error_rate_coeff);
 -
 -              size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
 -                      " ",
 -                      1,
 -                      "SOCCLK",
 -                      activity_monitor.Soc_FPS,
 -                      activity_monitor.Soc_UseRlcBusy,
 -                      activity_monitor.Soc_MinActiveFreqType,
 -                      activity_monitor.Soc_MinActiveFreq,
 -                      activity_monitor.Soc_BoosterFreqType,
 -                      activity_monitor.Soc_BoosterFreq,
 -                      activity_monitor.Soc_PD_Data_limit_c,
 -                      activity_monitor.Soc_PD_Data_error_coeff,
 -                      activity_monitor.Soc_PD_Data_error_rate_coeff);
 -
 -              size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
 -                      " ",
 -                      2,
 -                      "UCLK",
 -                      activity_monitor.Mem_FPS,
 -                      activity_monitor.Mem_UseRlcBusy,
 -                      activity_monitor.Mem_MinActiveFreqType,
 -                      activity_monitor.Mem_MinActiveFreq,
 -                      activity_monitor.Mem_BoosterFreqType,
 -                      activity_monitor.Mem_BoosterFreq,
 -                      activity_monitor.Mem_PD_Data_limit_c,
 -                      activity_monitor.Mem_PD_Data_error_coeff,
 -                      activity_monitor.Mem_PD_Data_error_rate_coeff);
 -
 -              size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
 -                      " ",
 -                      3,
 -                      "FCLK",
 -                      activity_monitor.Fclk_FPS,
 -                      activity_monitor.Fclk_UseRlcBusy,
 -                      activity_monitor.Fclk_MinActiveFreqType,
 -                      activity_monitor.Fclk_MinActiveFreq,
 -                      activity_monitor.Fclk_BoosterFreqType,
 -                      activity_monitor.Fclk_BoosterFreq,
 -                      activity_monitor.Fclk_PD_Data_limit_c,
 -                      activity_monitor.Fclk_PD_Data_error_coeff,
 -                      activity_monitor.Fclk_PD_Data_error_rate_coeff);
 -      }
 -
 -      return size;
 -}
 -
 -static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
 -{
 -      DpmActivityMonitorCoeffInt_t activity_monitor;
 -      int workload_type = 0, ret = 0;
 -
 -      smu->power_profile_mode = input[size];
 -
 -      if (!smu->pm_enabled)
 -              return ret;
 -      if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
 -              pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
 -              return -EINVAL;
 -      }
 -
 -      if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
 -              ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
 -                                              WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false);
 -              if (ret) {
 -                      pr_err("[%s] Failed to get activity monitor!", __func__);
 -                      return ret;
 -              }
 -
 -              switch (input[0]) {
 -              case 0: /* Gfxclk */
 -                      activity_monitor.Gfx_FPS = input[1];
 -                      activity_monitor.Gfx_UseRlcBusy = input[2];
 -                      activity_monitor.Gfx_MinActiveFreqType = input[3];
 -                      activity_monitor.Gfx_MinActiveFreq = input[4];
 -                      activity_monitor.Gfx_BoosterFreqType = input[5];
 -                      activity_monitor.Gfx_BoosterFreq = input[6];
 -                      activity_monitor.Gfx_PD_Data_limit_c = input[7];
 -                      activity_monitor.Gfx_PD_Data_error_coeff = input[8];
 -                      activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
 -                      break;
 -              case 1: /* Socclk */
 -                      activity_monitor.Soc_FPS = input[1];
 -                      activity_monitor.Soc_UseRlcBusy = input[2];
 -                      activity_monitor.Soc_MinActiveFreqType = input[3];
 -                      activity_monitor.Soc_MinActiveFreq = input[4];
 -                      activity_monitor.Soc_BoosterFreqType = input[5];
 -                      activity_monitor.Soc_BoosterFreq = input[6];
 -                      activity_monitor.Soc_PD_Data_limit_c = input[7];
 -                      activity_monitor.Soc_PD_Data_error_coeff = input[8];
 -                      activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
 -                      break;
 -              case 2: /* Uclk */
 -                      activity_monitor.Mem_FPS = input[1];
 -                      activity_monitor.Mem_UseRlcBusy = input[2];
 -                      activity_monitor.Mem_MinActiveFreqType = input[3];
 -                      activity_monitor.Mem_MinActiveFreq = input[4];
 -                      activity_monitor.Mem_BoosterFreqType = input[5];
 -                      activity_monitor.Mem_BoosterFreq = input[6];
 -                      activity_monitor.Mem_PD_Data_limit_c = input[7];
 -                      activity_monitor.Mem_PD_Data_error_coeff = input[8];
 -                      activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
 -                      break;
 -              case 3: /* Fclk */
 -                      activity_monitor.Fclk_FPS = input[1];
 -                      activity_monitor.Fclk_UseRlcBusy = input[2];
 -                      activity_monitor.Fclk_MinActiveFreqType = input[3];
 -                      activity_monitor.Fclk_MinActiveFreq = input[4];
 -                      activity_monitor.Fclk_BoosterFreqType = input[5];
 -                      activity_monitor.Fclk_BoosterFreq = input[6];
 -                      activity_monitor.Fclk_PD_Data_limit_c = input[7];
 -                      activity_monitor.Fclk_PD_Data_error_coeff = input[8];
 -                      activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
 -                      break;
 -              }
 -
 -              ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
 -                                              WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true);
 -              if (ret) {
 -                      pr_err("[%s] Failed to set activity monitor!", __func__);
 -                      return ret;
 -              }
 -      }
 -
 -      /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
 -      workload_type =
 -              smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode);
 -      smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
 -                                  1 << workload_type);
 -
        return ret;
  }
  
 -static int smu_v11_0_update_od8_settings(struct smu_context *smu,
 -                                      uint32_t index,
 -                                      uint32_t value)
 -{
 -      struct smu_table_context *table_context = &smu->smu_table;
 -      int ret;
 -
 -      ret = smu_update_table(smu, TABLE_OVERDRIVE,
 -                             table_context->overdrive_table, false);
 -      if (ret) {
 -              pr_err("Failed to export over drive table!\n");
 -              return ret;
 -      }
 -
 -      smu_update_specified_od8_value(smu, index, value);
 -
 -      ret = smu_update_table(smu, TABLE_OVERDRIVE,
 -                             table_context->overdrive_table, true);
 -      if (ret) {
 -              pr_err("Failed to import over drive table!\n");
 -              return ret;
 -      }
 -
 -      return 0;
 -}
 -
 -static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
 -{
 -      if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
 -              return 0;
 -
 -      if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
 -              return 0;
 -
 -      return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
 -}
 -
 -static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
 -{
 -      if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
 -              return 0;
 -
 -      if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
 -              return 0;
 -
 -      return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
 -}
 -
  static int smu_v11_0_get_current_rpm(struct smu_context *smu,
                                     uint32_t *current_rpm)
  {
  static uint32_t
  smu_v11_0_get_fan_control_mode(struct smu_context *smu)
  {
 -      if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT))
 +      if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
                return AMD_FAN_CTRL_MANUAL;
        else
                return AMD_FAN_CTRL_AUTO;
  }
  
  static int
 -smu_v11_0_get_fan_speed_percent(struct smu_context *smu,
 -                                         uint32_t *speed)
 -{
 -      int ret = 0;
 -      uint32_t percent = 0;
 -      uint32_t current_rpm;
 -      PPTable_t *pptable = smu->smu_table.driver_pptable;
 -
 -      ret = smu_v11_0_get_current_rpm(smu, &current_rpm);
 -      percent = current_rpm * 100 / pptable->FanMaximumRpm;
 -      *speed = percent > 100 ? 100 : percent;
 -
 -      return ret;
 -}
 -
 -static int
  smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
  {
        int ret = 0;
  
 -      if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT))
 +      if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
                return 0;
  
 -      ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start);
 +      ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start);
        if (ret)
                pr_err("[%s]%s smc FAN CONTROL feature failed!",
                       __func__, (start ? "Start" : "Stop"));
@@@ -1508,9 -2020,6 +1511,9 @@@ set_fan_speed_rpm_failed
        return ret;
  }
  
 +#define XGMI_STATE_D0 1
 +#define XGMI_STATE_D3 0
 +
  static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
                                     uint32_t pstate)
  {
        return ret;
  }
  
 +#define THM_11_0__SRCID__THM_DIG_THERM_L2H            0               /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
 +#define THM_11_0__SRCID__THM_DIG_THERM_H2L            1               /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
 +
 +static int smu_v11_0_irq_process(struct amdgpu_device *adev,
 +                               struct amdgpu_irq_src *source,
 +                               struct amdgpu_iv_entry *entry)
 +{
 +      uint32_t client_id = entry->client_id;
 +      uint32_t src_id = entry->src_id;
 +
 +      if (client_id == SOC15_IH_CLIENTID_THM) {
 +              switch (src_id) {
 +              case THM_11_0__SRCID__THM_DIG_THERM_L2H:
 +                      pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
 +                              PCI_BUS_NUM(adev->pdev->devfn),
 +                              PCI_SLOT(adev->pdev->devfn),
 +                              PCI_FUNC(adev->pdev->devfn));
 +              break;
 +              case THM_11_0__SRCID__THM_DIG_THERM_H2L:
 +                      pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
 +                              PCI_BUS_NUM(adev->pdev->devfn),
 +                              PCI_SLOT(adev->pdev->devfn),
 +                              PCI_FUNC(adev->pdev->devfn));
 +              break;
 +              default:
 +                      pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
 +                              src_id,
 +                              PCI_BUS_NUM(adev->pdev->devfn),
 +                              PCI_SLOT(adev->pdev->devfn),
 +                              PCI_FUNC(adev->pdev->devfn));
 +              break;
 +
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
 +{
 +      .process = smu_v11_0_irq_process,
 +};
 +
 +static int smu_v11_0_register_irq_handler(struct smu_context *smu)
 +{
 +      struct amdgpu_device *adev = smu->adev;
 +      struct amdgpu_irq_src *irq_src = smu->irq_source;
 +      int ret = 0;
 +
 +      /* already register */
 +      if (irq_src)
 +              return 0;
 +
 +      irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
 +      if (!irq_src)
 +              return -ENOMEM;
 +      smu->irq_source = irq_src;
 +
 +      irq_src->funcs = &smu_v11_0_irq_funcs;
 +
 +      ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
 +                              THM_11_0__SRCID__THM_DIG_THERM_L2H,
 +                              irq_src);
 +      if (ret)
 +              return ret;
 +
 +      ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
 +                              THM_11_0__SRCID__THM_DIG_THERM_H2L,
 +                              irq_src);
 +      if (ret)
 +              return ret;
 +
 +      return ret;
 +}
 +
 +static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
 +              struct pp_smu_nv_clock_table *max_clocks)
 +{
 +      struct smu_table_context *table_context = &smu->smu_table;
 +      struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
 +
 +      if (!max_clocks || !table_context->max_sustainable_clocks)
 +              return -EINVAL;
 +
 +      sustainable_clocks = table_context->max_sustainable_clocks;
 +
 +      max_clocks->dcfClockInKhz =
 +                      (unsigned int) sustainable_clocks->dcef_clock * 1000;
 +      max_clocks->displayClockInKhz =
 +                      (unsigned int) sustainable_clocks->display_clock * 1000;
 +      max_clocks->phyClockInKhz =
 +                      (unsigned int) sustainable_clocks->phy_clock * 1000;
 +      max_clocks->pixelClockInKhz =
 +                      (unsigned int) sustainable_clocks->pixel_clock * 1000;
 +      max_clocks->uClockInKhz =
 +                      (unsigned int) sustainable_clocks->uclock * 1000;
 +      max_clocks->socClockInKhz =
 +                      (unsigned int) sustainable_clocks->soc_clock * 1000;
 +      max_clocks->dscClockInKhz = 0;
 +      max_clocks->dppClockInKhz = 0;
 +      max_clocks->fabricClockInKhz = 0;
 +
 +      return 0;
 +}
 +
 +static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
 +{
 +      int ret = 0;
 +
 +      mutex_lock(&smu->mutex);
 +      ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
 +      mutex_unlock(&smu->mutex);
 +
 +      return ret;
 +}
 +
  static const struct smu_funcs smu_v11_0_funcs = {
        .init_microcode = smu_v11_0_init_microcode,
        .load_microcode = smu_v11_0_load_microcode,
        .send_smc_msg = smu_v11_0_send_msg,
        .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
        .read_smc_arg = smu_v11_0_read_arg,
 -      .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
 +      .setup_pptable = smu_v11_0_setup_pptable,
        .init_smc_tables = smu_v11_0_init_smc_tables,
        .fini_smc_tables = smu_v11_0_fini_smc_tables,
        .init_power = smu_v11_0_init_power,
        .write_watermarks_table = smu_v11_0_write_watermarks_table,
        .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
        .set_tool_table_location = smu_v11_0_set_tool_table_location,
 -      .init_display = smu_v11_0_init_display,
 +      .init_display_count = smu_v11_0_init_display_count,
        .set_allowed_mask = smu_v11_0_set_allowed_mask,
        .get_enabled_mask = smu_v11_0_get_enabled_mask,
 -      .is_dpm_running = smu_v11_0_is_dpm_running,
        .system_features_control = smu_v11_0_system_features_control,
        .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
        .notify_display_change = smu_v11_0_notify_display_change,
        .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
        .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
        .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
 -      .get_sclk = smu_v11_0_dpm_get_sclk,
 -      .get_mclk = smu_v11_0_dpm_get_mclk,
 -      .set_od8_default_settings = smu_v11_0_set_od8_default_settings,
 -      .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
 -      .get_power_profile_mode = smu_v11_0_get_power_profile_mode,
 -      .set_power_profile_mode = smu_v11_0_set_power_profile_mode,
 -      .update_od8_settings = smu_v11_0_update_od8_settings,
 -      .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable,
 -      .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable,
        .get_current_rpm = smu_v11_0_get_current_rpm,
        .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
        .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
 -      .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
        .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
        .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
        .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
 +      .gfx_off_control = smu_v11_0_gfx_off_control,
 +      .register_irq_handler = smu_v11_0_register_irq_handler,
 +      .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
 +      .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
  };
  
  void smu_v11_0_set_smu_funcs(struct smu_context *smu)
        case CHIP_VEGA20:
                vega20_set_ppt_funcs(smu);
                break;
 +      case CHIP_NAVI10:
 +              navi10_set_ppt_funcs(smu);
 +              break;
        default:
                pr_warn("Unknown asic for smu11\n");
        }
@@@ -25,6 -25,7 +25,7 @@@
  #include <linux/fb.h>
  #include "linux/delay.h"
  #include <linux/types.h>
+ #include <linux/pci.h>
  
  #include "smumgr.h"
  #include "pp_debug.h"
@@@ -2935,7 -2936,6 +2936,7 @@@ static int ci_update_smc_table(struct p
  }
  
  const struct pp_smumgr_func ci_smu_funcs = {
 +      .name = "ci_smu",
        .smu_init = ci_smu_init,
        .smu_fini = ci_smu_fini,
        .start_smu = ci_start_smu,
@@@ -25,6 -25,7 +25,7 @@@
  #include "pp_debug.h"
  #include <linux/types.h>
  #include <linux/kernel.h>
+ #include <linux/pci.h>
  #include <linux/slab.h>
  #include <linux/gfp.h>
  
@@@ -2661,7 -2662,6 +2662,7 @@@ static bool iceland_is_dpm_running(stru
  }
  
  const struct pp_smumgr_func iceland_smu_funcs = {
 +      .name = "iceland_smu",
        .smu_init = &iceland_smu_init,
        .smu_fini = &smu7_smu_fini,
        .start_smu = &iceland_start_smu,
@@@ -21,6 -21,8 +21,8 @@@
   *
   */
  
+ #include <linux/pci.h>
  #include "pp_debug.h"
  #include "smumgr.h"
  #include "smu74.h"
@@@ -2550,7 -2552,6 +2552,7 @@@ static int polaris10_update_dpm_setting
  }
  
  const struct pp_smumgr_func polaris10_smu_funcs = {
 +      .name = "polaris10_smu",
        .smu_init = polaris10_smu_init,
        .smu_fini = smu7_smu_fini,
        .start_smu = polaris10_start_smu,
@@@ -21,6 -21,8 +21,8 @@@
   *
   */
  
+ #include <linux/pci.h>
  #include "smumgr.h"
  #include "smu10_inc.h"
  #include "soc15_common.h"
@@@ -291,7 -293,6 +293,7 @@@ static int smu10_smc_table_manager(stru
  
  
  const struct pp_smumgr_func smu10_smu_funcs = {
 +      .name = "smu10_smu",
        .smu_init = &smu10_smu_init,
        .smu_fini = &smu10_smu_fini,
        .start_smu = &smu10_start_smu,
@@@ -23,6 -23,7 +23,7 @@@
  #include "pp_debug.h"
  #include <linux/types.h>
  #include <linux/kernel.h>
+ #include <linux/pci.h>
  #include <linux/slab.h>
  #include <linux/gfp.h>
  
@@@ -3240,7 -3241,6 +3241,7 @@@ static int tonga_update_dpm_settings(st
  }
  
  const struct pp_smumgr_func tonga_smu_funcs = {
 +      .name = "tonga_smu",
        .smu_init = &tonga_smu_init,
        .smu_fini = &smu7_smu_fini,
        .start_smu = &tonga_start_smu,
@@@ -21,6 -21,8 +21,8 @@@
   *
   */
  
+ #include <linux/pci.h>
  #include "smumgr.h"
  #include "vega10_inc.h"
  #include "soc15_common.h"
@@@ -346,7 -348,6 +348,7 @@@ static int vega10_smc_table_manager(str
  }
  
  const struct pp_smumgr_func vega10_smu_funcs = {
 +      .name = "vega10_smu",
        .smu_init = &vega10_smu_init,
        .smu_fini = &vega10_smu_fini,
        .start_smu = &vega10_start_smu,
   *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
   *    Dave Airlie
   */
+ #include <linux/dma-mapping.h>
+ #include <linux/pagemap.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/swap.h>
+ #include <linux/swiotlb.h>
+ #include <drm/drm_agpsupport.h>
+ #include <drm/drm_debugfs.h>
+ #include <drm/drm_device.h>
+ #include <drm/drm_file.h>
+ #include <drm/drm_pci.h>
+ #include <drm/drm_prime.h>
+ #include <drm/radeon_drm.h>
  #include <drm/ttm/ttm_bo_api.h>
  #include <drm/ttm/ttm_bo_driver.h>
- #include <drm/ttm/ttm_placement.h>
  #include <drm/ttm/ttm_module.h>
  #include <drm/ttm/ttm_page_alloc.h>
- #include <drm/drmP.h>
- #include <drm/radeon_drm.h>
- #include <linux/seq_file.h>
- #include <linux/slab.h>
- #include <linux/swiotlb.h>
- #include <linux/swap.h>
- #include <linux/pagemap.h>
- #include <linux/debugfs.h>
+ #include <drm/ttm/ttm_placement.h>
  #include "radeon_reg.h"
  #include "radeon.h"
  
@@@ -1056,14 -1064,19 +1064,14 @@@ static int radeon_ttm_debugfs_init(stru
        unsigned count;
  
        struct drm_minor *minor = rdev->ddev->primary;
 -      struct dentry *ent, *root = minor->debugfs_root;
 -
 -      ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
 -                                rdev, &radeon_ttm_vram_fops);
 -      if (IS_ERR(ent))
 -              return PTR_ERR(ent);
 -      rdev->mman.vram = ent;
 -
 -      ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
 -                                rdev, &radeon_ttm_gtt_fops);
 -      if (IS_ERR(ent))
 -              return PTR_ERR(ent);
 -      rdev->mman.gtt = ent;
 +      struct dentry *root = minor->debugfs_root;
 +
 +      rdev->mman.vram = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO,
 +                                            root, rdev,
 +                                            &radeon_ttm_vram_fops);
 +
 +      rdev->mman.gtt = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO,
 +                                           root, rdev, &radeon_ttm_gtt_fops);
  
        count = ARRAY_SIZE(radeon_ttm_debugfs_list);
  
  #define DP_DSC_PEAK_THROUGHPUT              0x06B
  # define DP_DSC_THROUGHPUT_MODE_0_MASK      (0xf << 0)
  # define DP_DSC_THROUGHPUT_MODE_0_SHIFT     0
 +# define DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED 0
  # define DP_DSC_THROUGHPUT_MODE_0_340       (1 << 0)
  # define DP_DSC_THROUGHPUT_MODE_0_400       (2 << 0)
  # define DP_DSC_THROUGHPUT_MODE_0_450       (3 << 0)
  # define DP_DSC_THROUGHPUT_MODE_0_900       (12 << 0)
  # define DP_DSC_THROUGHPUT_MODE_0_950       (13 << 0)
  # define DP_DSC_THROUGHPUT_MODE_0_1000      (14 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_170       (15 << 4)
  # define DP_DSC_THROUGHPUT_MODE_1_MASK      (0xf << 4)
  # define DP_DSC_THROUGHPUT_MODE_1_SHIFT     4
 +# define DP_DSC_THROUGHPUT_MODE_1_UPSUPPORTED 0
  # define DP_DSC_THROUGHPUT_MODE_1_340       (1 << 4)
  # define DP_DSC_THROUGHPUT_MODE_1_400       (2 << 4)
  # define DP_DSC_THROUGHPUT_MODE_1_450       (3 << 4)
  # define DP_DSC_THROUGHPUT_MODE_1_900       (12 << 4)
  # define DP_DSC_THROUGHPUT_MODE_1_950       (13 << 4)
  # define DP_DSC_THROUGHPUT_MODE_1_1000      (14 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_170       (15 << 4)
  
  #define DP_DSC_MAX_SLICE_WIDTH              0x06C
  #define DP_DSC_MIN_SLICE_WIDTH_VALUE        2560
  # define DP_FEC_CORR_BLK_ERROR_COUNT_CAP    (1 << 2)
  # define DP_FEC_BIT_ERROR_COUNT_CAP       (1 << 3)
  
 +/* DP Extended DSC Capabilities */
 +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0  0x0a0   /* DP 1.4a SCR */
 +#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1  0x0a1
 +#define DP_DSC_BRANCH_MAX_LINE_WIDTH        0x0a2
 +
  /* link configuration */
  #define       DP_LINK_BW_SET                      0x100
  # define DP_LINK_RATE_TABLE               0x00    /* eDP 1.4 */
@@@ -1423,6 -1414,13 +1423,13 @@@ enum drm_dp_quirk 
         * driver still need to implement proper handling for such device.
         */
        DP_DPCD_QUIRK_NO_PSR,
+       /**
+        * @DP_DPCD_QUIRK_NO_SINK_COUNT:
+        *
+        * The device does not set SINK_COUNT to a non-zero value.
+        * The driver should ignore SINK_COUNT during detection.
+        */
+       DP_DPCD_QUIRK_NO_SINK_COUNT,
  };
  
  /**