#ifndef CONFIG_PRESERVE_FA_DUMP
static DEFINE_MUTEX(fadump_mutex);
-struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 };
-struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 };
+struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
+
+#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
+#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
+ sizeof(struct fadump_memory_range))
+static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
+struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
+ RESERVED_RNGS_SZ, 0,
+ RESERVED_RNGS_CNT, true };
+
+static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
#ifdef CONFIG_CMA
static struct cma *fadump_cma;
int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
int depth, void *data)
{
+ if (depth == 0) {
+ early_init_dt_scan_reserved_ranges(node);
+ return 0;
+ }
+
if (depth != 1)
return 0;
static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
{
+ if (mrange_info->is_static) {
+ mrange_info->mem_range_cnt = 0;
+ return;
+ }
+
kfree(mrange_info->mem_ranges);
- mrange_info->mem_ranges = NULL;
- mrange_info->mem_ranges_sz = 0;
- mrange_info->max_mem_ranges = 0;
+ memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
+ (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
}
/*
if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
int ret;
+ if (mrange_info->is_static) {
+ pr_err("Reached array size limit for %s memory ranges\n",
+ mrange_info->name);
+ return -ENOSPC;
+ }
+
ret = fadump_alloc_mem_ranges(mrange_info);
if (ret)
return ret;
* Scan reserved-ranges to consider them while reserving/releasing
* memory for FADump.
*/
-static inline int fadump_scan_reserved_mem_ranges(void)
+static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
{
- struct device_node *root;
const __be32 *prop;
int len, ret = -1;
unsigned long i;
- root = of_find_node_by_path("/");
- if (!root)
- return ret;
+ /* reserved-ranges already scanned */
+ if (reserved_mrange_info.mem_range_cnt != 0)
+ return;
- prop = of_get_property(root, "reserved-ranges", &len);
+ prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
if (!prop)
- return ret;
+ return;
/*
* Each reserved range is an (address,size) pair, 2 cells each,
}
}
- return ret;
+ /* Compact reserved ranges */
+ sort_and_merge_mem_ranges(&reserved_mrange_info);
}
/*
u64 ra_start, ra_end, tstart;
int i, ret;
- fadump_scan_reserved_mem_ranges();
-
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
/*
- * Add reserved dump area to reserved ranges list
- * and exclude all these ranges while releasing memory.
+ * If reserved ranges array limit is hit, overwrite the last reserved
+ * memory range with reserved dump area to ensure it is excluded from
+ * the memory being released (reused for next FADump registration).
*/
- ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
- if (ret != 0) {
- /*
- * Not enough memory to setup reserved ranges but the system is
- * running shortage of memory. So, release all the memory except
- * Reserved dump area (reused for next fadump registration).
- */
- if (begin < ra_end && end > ra_start) {
- if (begin < ra_start)
- fadump_release_reserved_area(begin, ra_start);
- if (end > ra_end)
- fadump_release_reserved_area(ra_end, end);
- } else
- fadump_release_reserved_area(begin, end);
+ if (reserved_mrange_info.mem_range_cnt ==
+ reserved_mrange_info.max_mem_ranges)
+ reserved_mrange_info.mem_range_cnt--;
+ ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
+ if (ret != 0)
return;
- }
/* Get the reserved ranges list in order first. */
sort_and_merge_mem_ranges(&reserved_mrange_info);