return ret;
}
-static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
- uint32_t max_length)
+static uint32_t
+amdgpu_ras_calculate_badpags_threshold(struct amdgpu_device *adev)
{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int tmp_threshold = amdgpu_bad_page_threshold;
u64 val;
+ uint32_t max_length = 0;
+ max_length = amdgpu_ras_eeprom_get_record_max_length();
/*
* Justification of value bad_page_cnt_threshold in ras structure
*
tmp_threshold = max_length;
if (tmp_threshold == -1) {
- val = adev->gmc.mc_vram_size;
+ val = adev->gmc.real_vram_size;
do_div(val, RAS_BAD_PAGE_RATE);
- con->bad_page_cnt_threshold = min(lower_32_bits(val),
- max_length);
- } else {
- con->bad_page_cnt_threshold = tmp_threshold;
+ tmp_threshold = min(lower_32_bits(val), max_length);
}
+
+ return tmp_threshold;
}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data **data;
- uint32_t max_eeprom_records_len = 0;
bool exc_err_limit = false;
int ret;
atomic_set(&con->in_recovery, 0);
con->adev = adev;
- max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
- amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
+ if (!con->bad_page_cnt_threshold) {
+ con->bad_page_cnt_threshold =
+ amdgpu_ras_calculate_badpags_threshold(adev);
+
+ ret = amdgpu_vram_mgr_reserve_backup_pages(
+ ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
+ con->bad_page_cnt_threshold);
+ if (ret)
+ goto out;
+ }
ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
/*
#include "amdgpu_atomfirmware.h"
#include "atom.h"
+static int amdgpu_vram_mgr_free_backup_pages(struct amdgpu_vram_mgr *mgr,
+ uint32_t num_pages);
+
static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man)
{
return container_of(man, struct amdgpu_vram_mgr, manager);
spin_lock_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
+ INIT_LIST_HEAD(&mgr->backup_pages);
/* Add the two VRAM-related sysfs files */
ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
drm_mm_remove_node(&rsv->mm_node);
kfree(rsv);
}
+
+ list_for_each_entry_safe(rsv, temp, &mgr->backup_pages, node) {
+ drm_mm_remove_node(&rsv->mm_node);
+ kfree(rsv);
+ }
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
continue;
dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
- rsv->mm_node.start, rsv->mm_node.size);
+ rsv->mm_node.start << PAGE_SHIFT, rsv->mm_node.size);
vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
atomic64_add(vis_usage, &mgr->vis_usage);
atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage);
list_move(&rsv->node, &mgr->reserved_pages);
+
+ amdgpu_vram_mgr_free_backup_pages(mgr, rsv->mm_node.size);
}
}
uint64_t start, uint64_t size)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct amdgpu_vram_reservation *rsv;
rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
rsv->mm_node.start = start >> PAGE_SHIFT;
rsv->mm_node.size = size >> PAGE_SHIFT;
+ dev_dbg(adev->dev, "Pending Reservation: 0x%llx\n", start);
+
spin_lock(&mgr->lock);
- list_add_tail(&mgr->reservations_pending, &rsv->node);
+ list_add_tail(&rsv->node, &mgr->reservations_pending);
amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock);
return 0;
}
+static int amdgpu_vram_mgr_free_backup_pages(struct amdgpu_vram_mgr *mgr,
+ uint32_t num_pages)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct amdgpu_vram_reservation *rsv;
+ uint32_t i;
+ uint64_t vis_usage = 0, total_usage = 0;
+
+ if (num_pages > mgr->num_backup_pages) {
+ dev_warn(adev->dev, "No enough backup pages\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_pages; i++) {
+ rsv = list_first_entry(&mgr->backup_pages,
+ struct amdgpu_vram_reservation, node);
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
+ total_usage += (rsv->mm_node.size << PAGE_SHIFT);
+ drm_mm_remove_node(&rsv->mm_node);
+ list_del(&rsv->node);
+ kfree(rsv);
+ mgr->num_backup_pages--;
+ }
+
+ atomic64_sub(total_usage, &mgr->usage);
+ atomic64_sub(vis_usage, &mgr->vis_usage);
+
+ return 0;
+}
+
+int amdgpu_vram_mgr_reserve_backup_pages(struct ttm_resource_manager *man,
+ uint32_t num_pages)
+{
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct amdgpu_vram_reservation *rsv;
+ struct drm_mm *mm = &mgr->mm;
+ uint32_t i;
+ int ret = 0;
+ uint64_t vis_usage, total_usage;
+
+ for (i = 0; i < num_pages; i++) {
+ rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
+ if (!rsv) {
+ ret = -ENOMEM;
+ goto pro_end;
+ }
+
+ INIT_LIST_HEAD(&rsv->node);
+
+ ret = drm_mm_insert_node(mm, &rsv->mm_node, 1);
+ if (ret) {
+ dev_err(adev->dev, "failed to reserve backup page %d, ret 0x%x\n", i, ret);
+ kfree(rsv);
+ goto pro_end;
+ }
+
+ vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
+ total_usage = (rsv->mm_node.size << PAGE_SHIFT);
+
+ spin_lock(&mgr->lock);
+ atomic64_add(vis_usage, &mgr->vis_usage);
+ atomic64_add(total_usage, &mgr->usage);
+ list_add_tail(&rsv->node, &mgr->backup_pages);
+ mgr->num_backup_pages++;
+ spin_unlock(&mgr->lock);
+ }
+
+pro_end:
+ if (ret) {
+ spin_lock(&mgr->lock);
+ amdgpu_vram_mgr_free_backup_pages(mgr, mgr->num_backup_pages);
+ spin_unlock(&mgr->lock);
+ }
+
+ return ret;
+}
+
/**
* amdgpu_vram_mgr_query_page_status - query the reservation status
*