amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o
+amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
+
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
# add asic specific block
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
- nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
+ nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \
+ beige_goby_reg_init.o
# add DF block
amdgpu-y += \
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+ #include "amdgpu_res_cursor.h"
+
#ifdef CONFIG_MMU_NOTIFIER
#include <linux/mmu_notifier.h>
#endif
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
{
- return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+ return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
}
/**
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
- struct drm_mm_node *node = bo->tbo.mem.mm_node;
- unsigned long pages_left;
+ struct amdgpu_res_cursor cursor;
if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
return false;
- for (pages_left = bo->tbo.mem.num_pages; pages_left;
- pages_left -= node->size, node++)
- if (node->start < fpfn)
+ amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor);
+ while (cursor.remaining) {
+ if (cursor.start < adev->gmc.visible_vram_size)
return true;
+ amdgpu_res_next(&cursor, cursor.size);
+ }
+
return false;
}
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
+ uint64_t *gtt_mem, uint64_t *cpu_mem);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
*addr += mm_cur->start & ~PAGE_MASK;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- num_bytes = num_pages * 8;
+ num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job);
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
- static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
+ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct drm_mm_node *mm_node = mem->mm_node;
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
switch (mem->mem_type) {
/* check if it's visible */
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
- /* Only physically contiguous buffers apply. In a contiguous
- * buffer, size of the first mm_node would match the number of
- * pages in ttm_resource.
- */
+
if (adev->mman.aper_base_kaddr &&
- (mm_node->size == mem->num_pages))
+ mem->placement & TTM_PL_FLAG_CONTIGUOUS)
mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
mem->bus.offset;
DRM_ERROR("failed to pin userptr\n");
return r;
}
+ } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ if (!ttm->sg) {
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ attach = gtt->gobj->import_attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ttm->sg = sgt;
+ }
+
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
}
+
if (!ttm->num_pages) {
WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
} else {
/* allocate GART space */
- tmp = bo->mem;
- tmp.mm_node = NULL;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
int r;
/* if the pages have userptr pinning then clear that first */
- if (gtt->userptr)
+ if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
+ } else if (ttm->sg && gtt->gobj->import_attach) {
+ struct dma_buf_attachment *attach;
+
+ attach = gtt->gobj->import_attach;
+ dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+ ttm->sg = NULL;
+ }
if (!gtt->bound)
return;
return 0;
}
- if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
- if (!ttm->sg) {
- struct dma_buf_attachment *attach;
- struct sg_table *sgt;
-
- attach = gtt->gobj->import_attach;
- sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt))
- return PTR_ERR(sgt);
-
- ttm->sg = sgt;
- }
-
- drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
- ttm->num_pages);
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return 0;
- }
return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
}
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
- ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
- return;
- }
-
- if (ttm->sg && gtt->gobj->import_attach) {
- struct dma_buf_attachment *attach;
-
- attach = gtt->gobj->import_attach;
- dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
ttm->sg = NULL;
+ ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
}
bool mem_train_support = false;
if (!amdgpu_sriov_vf(adev)) {
- ret = amdgpu_mem_train_support(adev);
- if (ret == 1)
+ if (amdgpu_atomfirmware_mem_training_supported(adev))
mem_train_support = true;
- else if (ret == -1)
- return -EINVAL;
else
DRM_DEBUG("memory training does not support!\n");
}
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
#include <linux/idr.h>
#include "amdgpu_gmc.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_dma_buf.h"
+ #include "amdgpu_res_cursor.h"
#include "kfd_svm.h"
/**
while (cursor.pfn < frag_start) {
amdgpu_vm_free_pts(adev, params->vm, &cursor);
amdgpu_vm_pt_next(adev, &cursor);
+ params->table_freed = true;
}
} else if (frag >= shift) {
* @last: last mapped entry
* @flags: flags for the entries
* @offset: offset into nodes and pages_addr
- * @nodes: array of drm_mm_nodes with the MC addresses
+ * @res: ttm_resource to map
* @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence
+ * @table_freed: return true if page table is freed
*
* Fill in the page table entries between @start and @last.
*
bool unlocked, struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t offset,
- struct drm_mm_node *nodes,
+ struct ttm_resource *res,
dma_addr_t *pages_addr,
- struct dma_fence **fence)
+ struct dma_fence **fence,
+ bool *table_freed)
{
struct amdgpu_vm_update_params params;
+ struct amdgpu_res_cursor cursor;
enum amdgpu_sync_mode sync_mode;
- uint64_t pfn;
int r;
memset(¶ms, 0, sizeof(params));
else
sync_mode = AMDGPU_SYNC_EXPLICIT;
- pfn = offset >> PAGE_SHIFT;
- if (nodes) {
- while (pfn >= nodes->size) {
- pfn -= nodes->size;
- ++nodes;
- }
- }
-
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
r = -EBUSY;
if (r)
goto error_unlock;
- do {
+ amdgpu_res_first(res, offset, (last - start + 1) * AMDGPU_GPU_PAGE_SIZE,
+ &cursor);
+ while (cursor.remaining) {
uint64_t tmp, num_entries, addr;
-
- num_entries = last - start + 1;
- if (nodes) {
- addr = nodes->start << PAGE_SHIFT;
- num_entries = min((nodes->size - pfn) *
- AMDGPU_GPU_PAGES_IN_CPU_PAGE, num_entries);
- } else {
- addr = 0;
- }
-
+ num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
if (pages_addr) {
bool contiguous = true;
if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
+ uint64_t pfn = cursor.start >> PAGE_SHIFT;
uint64_t count;
contiguous = pages_addr[pfn + 1] ==
}
if (!contiguous) {
- addr = pfn << PAGE_SHIFT;
+ addr = cursor.start;
params.pages_addr = pages_addr;
} else {
- addr = pages_addr[pfn];
+ addr = pages_addr[cursor.start >> PAGE_SHIFT];
params.pages_addr = NULL;
}
} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
- addr += bo_adev->vm_manager.vram_base_offset;
- addr += pfn << PAGE_SHIFT;
+ addr = bo_adev->vm_manager.vram_base_offset +
+ cursor.start;
+ } else {
+ addr = 0;
}
tmp = start + num_entries;
if (r)
goto error_unlock;
- pfn += num_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- if (nodes && nodes->size == pfn) {
- pfn = 0;
- ++nodes;
- }
+ amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
start = tmp;
-
- } while (unlikely(start != last + 1));
+ };
r = vm->update_funcs->commit(¶ms, fence);
+ if (table_freed)
+ *table_freed = params.table_freed;
+
error_unlock:
amdgpu_vm_eviction_unlock(vm);
return r;
}
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
+ uint64_t *gtt_mem, uint64_t *cpu_mem)
+{
+ struct amdgpu_bo_va *bo_va, *tmp;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ spin_lock(&vm->invalidated_lock);
+ list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ spin_unlock(&vm->invalidated_lock);
+}
/**
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
*
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
struct ttm_resource *mem;
- struct drm_mm_node *nodes;
struct dma_fence **last_update;
struct dma_resv *resv;
uint64_t flags;
if (clear || !bo) {
mem = NULL;
- nodes = NULL;
resv = vm->root.base.bo->tbo.base.resv;
} else {
struct drm_gem_object *obj = &bo->tbo.base;
bo = gem_to_amdgpu_bo(gobj);
}
mem = &bo->tbo.mem;
- nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT)
pages_addr = bo->tbo.ttm->dma_address;
}
r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
resv, mapping->start,
mapping->last, update_flags,
- mapping->offset, nodes,
- pages_addr, last_update);
+ mapping->offset, mem,
+ pages_addr, last_update, NULL);
if (r)
return r;
}
r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false,
resv, mapping->start,
mapping->last, init_pte_value,
- 0, NULL, NULL, &f);
+ 0, NULL, NULL, &f, NULL);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
}
r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
- addr, flags, value, NULL, NULL,
+ addr, flags, value, NULL, NULL, NULL,
NULL);
if (r)
goto error_unlock;
* @num_dw_left: number of dw left for the IB
*/
unsigned int num_dw_left;
+
+ /**
+ * @table_freed: return true if page table is freed when updating
+ */
+ bool table_freed;
};
struct amdgpu_vm_update_funcs {
bool unlocked, struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t offset,
- struct drm_mm_node *nodes,
+ struct ttm_resource *res,
dma_addr_t *pages_addr,
- struct dma_fence **fence);
+ struct dma_fence **fence, bool *free_table);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
+ uint64_t *gtt_mem, uint64_t *cpu_mem);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
#include "amdgpu_atomfirmware.h"
#include "atom.h"
+ struct amdgpu_vram_reservation {
+ struct list_head node;
+ struct drm_mm_node mm_node;
+ };
+
static inline struct amdgpu_vram_mgr *
to_vram_mgr(struct ttm_resource_manager *man)
{
pages_per_node = 2UL << (20UL - PAGE_SHIFT);
#endif
pages_per_node = max_t(uint32_t, pages_per_node,
- mem->page_alignment);
+ tbo->page_alignment);
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
}
i = 0;
spin_lock(&mgr->lock);
while (pages_left) {
- uint32_t alignment = mem->page_alignment;
+ uint32_t alignment = tbo->page_alignment;
if (pages >= pages_per_node)
alignment = pages_per_node;
}
spin_unlock(&mgr->lock);
- atomic64_add(vis_usage, &mgr->vis_usage);
+ if (i == 1)
+ mem->placement |= TTM_PL_FLAG_CONTIGUOUS;
+ atomic64_add(vis_usage, &mgr->vis_usage);
mem->mm_node = nodes;
-
return 0;
error:
MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
+ #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
+ MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
return 0;
case CHIP_NAVI12:
dmub_asic = DMUB_ASIC_DCN302;
fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
break;
+ case CHIP_BEIGE_GOBY:
+ dmub_asic = DMUB_ASIC_DCN303;
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return ret;
}
- #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- amdgpu_dm_crtc_secure_display_suspend(adev);
- #endif
WARN_ON(adev->dm.cached_state);
adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
dm->cached_state = NULL;
- #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- amdgpu_dm_crtc_secure_display_resume(adev);
- #endif
-
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
max - min);
}
- static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ u32 user_brightness)
{
- struct amdgpu_display_manager *dm = bl_get_data(bd);
struct amdgpu_dm_backlight_caps caps;
struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
- u32 brightness;
+ u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
bool rc;
int i;
amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps;
- for (i = 0; i < dm->num_of_edps; i++)
+ for (i = 0; i < dm->num_of_edps; i++) {
+ dm->brightness[i] = user_brightness;
+ brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
link[i] = (struct dc_link *)dm->backlight_link[i];
+ }
- brightness = convert_brightness_from_user(&caps, bd->props.brightness);
- // Change brightness based on AUX property
+ /* Change brightness based on AUX property */
if (caps.aux_support) {
for (i = 0; i < dm->num_of_edps; i++) {
- rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
+ rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
if (!rc) {
DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
}
} else {
for (i = 0; i < dm->num_of_edps; i++) {
- rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
+ rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
if (!rc) {
DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
break;
return rc ? 0 : 1;
}
- static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+ amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
+
+ return 0;
+ }
+
+ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
+ {
struct amdgpu_dm_backlight_caps caps;
amdgpu_dm_update_backlight_caps(dm);
rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
if (!rc)
- return bd->props.brightness;
+ return dm->brightness[0];
return convert_brightness_to_user(&caps, avg);
} else {
int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
if (ret == DC_ERROR_UNEXPECTED)
- return bd->props.brightness;
+ return dm->brightness[0];
return convert_brightness_to_user(&caps, ret);
}
}
+ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+ {
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+ return amdgpu_dm_backlight_get_level(dm);
+ }
+
static const struct backlight_ops amdgpu_dm_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = amdgpu_dm_backlight_get_brightness,
{
char bl_name[16];
struct backlight_properties props = { 0 };
+ int i;
amdgpu_dm_update_backlight_caps(dm);
+ for (i = 0; i < dm->num_of_edps; i++)
+ dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = AMDGPU_MAX_BL_LEVEL;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
+ case CHIP_BEIGE_GOBY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
#endif
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
if (adev->asic_type == CHIP_SIENNA_CICHLID ||
adev->asic_type == CHIP_NAVY_FLOUNDER ||
adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
+ adev->asic_type == CHIP_BEIGE_GOBY ||
adev->asic_type == CHIP_VANGOGH)
tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
}
return 0;
}
-static bool
-is_hdr_metadata_different(const struct drm_connector_state *old_state,
- const struct drm_connector_state *new_state)
-{
- struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
- struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
-
- if (old_blob != new_blob) {
- if (old_blob && new_blob &&
- old_blob->length == new_blob->length)
- return memcmp(old_blob->data, new_blob->data,
- old_blob->length);
-
- return true;
- }
-
- return false;
-}
-
static int
amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state)
if (!crtc)
return 0;
- if (is_hdr_metadata_different(old_con_state, new_con_state)) {
+ if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
struct dc_info_packet hdr_infopacket;
ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP) {
- drm_object_attach_property(
- &aconnector->base.base,
- dm->ddev->mode_config.hdr_output_metadata_property, 0);
+ drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
if (!aconnector->mst_port)
drm_connector_attach_vrr_capable_property(&aconnector->base);
dm_old_crtc_state->abm_level;
hdr_changed =
- is_hdr_metadata_different(old_con_state, new_con_state);
+ !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
#ifdef CONFIG_DEBUG_FS
bool configure_crc = false;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
+ #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
+ #endif
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ cur_crc_src = acrtc->dm_irq_params.crc_src;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
#endif
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
* settings for the stream.
*/
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- cur_crc_src = acrtc->dm_irq_params.crc_src;
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
configure_crc = true;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- if (amdgpu_dm_crc_window_is_activated(crtc))
- configure_crc = false;
+ if (amdgpu_dm_crc_window_is_activated(crtc)) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ acrtc->dm_irq_params.crc_window.update_win = true;
+ acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
+ spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
+ crc_rd_wrk->crtc = crtc;
+ spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
#endif
}
if (configure_crc)
- amdgpu_dm_crtc_configure_crc_source(
- crtc, dm_new_crtc_state, cur_crc_src);
+ if (amdgpu_dm_crtc_configure_crc_source(
+ crtc, dm_new_crtc_state, cur_crc_src))
+ DRM_DEBUG_DRIVER("Failed to configure crc source");
#endif
}
}
/* Update audio instances for each connector. */
amdgpu_dm_commit_audio(dev, state);
+ #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ /* restore the backlight level */
+ if (dm->backlight_dev)
+ amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
+ #endif
/*
* send vblank event on all events not handled in flip and
* mark consumed event for drm_atomic_helper_commit_hw_done
return get_cr_failure(lane_count, dpcd_lane_status);
}
- static inline enum link_training_result perform_link_training_int(
+ static inline enum link_training_result dp_transition_to_video_idle(
struct dc_link *link,
struct link_training_settings *lt_settings,
enum link_training_result status)
return status;
}
- static void initialize_training_settings(
+ static inline void decide_8b_10b_training_settings(
struct dc_link *link,
const struct dc_link_settings *link_setting,
const struct dc_link_training_overrides *overrides,
else
lt_settings->link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+ lt_settings->lttpr_mode = link->lttpr_mode;
+
/* Initialize lane settings overrides */
if (overrides->voltage_swing != NULL)
lt_settings->voltage_swing = overrides->voltage_swing;
lt_settings->enhanced_framing = 1;
}
+ static void decide_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ const struct dc_link_training_overrides *overrides,
+ struct link_training_settings *lt_settings)
+ {
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
+ decide_8b_10b_training_settings(link, link_settings, overrides, lt_settings);
+ }
+
+
uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
{
switch (lttpr_repeater_count) {
{
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
core_link_write_dpcd(link,
DP_PHY_REPEATER_MODE,
(uint8_t *)&repeater_mode,
sizeof(repeater_mode));
}
- static void configure_lttpr_mode_non_transparent(struct dc_link *link)
+ static void configure_lttpr_mode_non_transparent(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings)
{
/* aux timeout is already set to extended */
/* RESET/SET lttpr mode to enable non transparent mode */
enum dc_status result = DC_ERROR_UNEXPECTED;
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
- DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
- result = core_link_write_dpcd(link,
- DP_PHY_REPEATER_MODE,
- (uint8_t *)&repeater_mode,
- sizeof(repeater_mode));
+ enum dp_link_encoding encoding = dp_get_link_encoding_format(<_settings->link_settings);
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
+ result = core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+
+ }
if (result == DC_OK) {
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
}
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-
- for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
- aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
- core_link_read_dpcd(
- link,
- aux_interval_address,
- (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
- sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
- link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
+ if (encoding == DP_8b_10b_ENCODING) {
+ repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
+ aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
+ core_link_read_dpcd(
+ link,
+ aux_interval_address,
+ (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
+ sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
+ }
}
}
}
{
struct link_training_settings lt_settings;
- initialize_training_settings(
+ decide_training_settings(
link,
link_setting,
&link->preferred_training_settings,
uint8_t repeater_cnt;
uint8_t repeater_id;
- initialize_training_settings(
+ decide_training_settings(
link,
link_setting,
&link->preferred_training_settings,
/* Configure lttpr mode */
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- configure_lttpr_mode_non_transparent(link);
+ configure_lttpr_mode_non_transparent(link, <_settings);
else if (link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
configure_lttpr_mode_transparent(link);
/* 3. set training not in progress*/
dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
- status = perform_link_training_int(link,
+ status = dp_transition_to_video_idle(link,
<_settings,
status);
}
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
bool fec_enable = false;
- initialize_training_settings(
+ decide_training_settings(
link,
link_settings,
lt_overrides,
return true;
}
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
+{
+ if (!max_link_enc_cap) {
+ DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
+ return false;
+ }
+
+ if (link->link_enc->funcs->get_max_link_cap) {
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, max_link_enc_cap);
+ return true;
+ }
+
+ DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__);
+ max_link_enc_cap->lane_count = 1;
+ max_link_enc_cap->link_rate = 6;
+ return false;
+}
+
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
struct dc_link_settings max_link_cap = {0};
struct dc_link_settings current_link_setting;
uint32_t link_bw;
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 ||
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
link->dpcd_caps.edp_supported_link_rates_count == 0) {
*link_setting = link->verified_link_cap;
return true;
union phy_test_pattern dpcd_test_pattern;
union lane_adjust dpcd_lane_adjustment[2];
unsigned char dpcd_post_cursor_2_adjustment = 0;
- unsigned char test_80_bit_pattern[
+ unsigned char test_pattern_buffer[
(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+ unsigned int test_pattern_size = 0;
enum dp_test_pattern test_pattern;
struct dc_link_training_settings link_settings;
union lane_adjust dpcd_lane_adjust;
break;
}
- if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM)
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
+ test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1;
core_link_read_dpcd(
link,
DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
- test_80_bit_pattern,
- sizeof(test_80_bit_pattern));
+ test_pattern_buffer,
+ test_pattern_size);
+ }
/* prepare link training settings */
link_settings.link = link->cur_link_settings;
test_pattern,
DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
&link_training_settings,
- test_80_bit_pattern,
- (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
- DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1);
+ test_pattern_buffer,
+ test_pattern_size);
}
static void dp_test_send_link_test_pattern(struct dc_link *link)
link->dpcd_caps.edp_supported_link_rates_count = 0;
memset(supported_link_rates, 0, sizeof(supported_link_rates));
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
(link->dc->debug.optimize_edp_link_rate ||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
return false;
}
+ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings)
+ {
+ if ((link_settings->link_rate >= LINK_RATE_LOW) &&
+ (link_settings->link_rate <= LINK_RATE_HIGH3))
+ return DP_8b_10b_ENCODING;
+ return DP_UNKNOWN_ENCODING;
+ }