baser->val = its_read_baser(its, baser);
}
+static void its_parse_baser_device(struct its_node *its, struct its_baser *baser,
+ u32 *order)
+{
+ u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
+ u32 ids = its->device_ids;
+ u32 new_order = *order;
+
+ /*
+ * Allocate as many entries as required to fit the
+ * range of device IDs that the ITS can grok... The ID
+ * space being incredibly sparse, this results in a
+ * massive waste of memory.
+ */
+ new_order = max_t(u32, get_order(esz << ids), new_order);
+ if (new_order >= MAX_ORDER) {
+ new_order = MAX_ORDER - 1;
+ ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
+ pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
+ &its->phys_base, its->device_ids, ids);
+ }
+
+ *order = new_order;
+}
+
static void its_free_tables(struct its_node *its)
{
int i;
if (type == GITS_BASER_TYPE_NONE)
continue;
- /*
- * Allocate as many entries as required to fit the
- * range of device IDs that the ITS can grok... The ID
- * space being incredibly sparse, this results in a
- * massive waste of memory.
- *
- * For other tables, only allocate a single page.
- */
- if (type == GITS_BASER_TYPE_DEVICE) {
- /*
- * 'order' was initialized earlier to the default page
- * granule of the the ITS. We can't have an allocation
- * smaller than that. If the requested allocation
- * is smaller, round up to the default page granule.
- */
- order = max(get_order((1UL << ids) * entry_size),
- order);
- if (order >= MAX_ORDER) {
- order = MAX_ORDER - 1;
- pr_warn("%s: Device Table too large, reduce its page order to %u\n",
- node_name, order);
- }
- }
+ if (type == GITS_BASER_TYPE_DEVICE)
+ its_parse_baser_device(its, baser, &order);
retry_alloc_baser:
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);