2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
7 * Anup Patel <anup.patel@wdc.com>
10 #include <sbi/riscv_asm.h>
11 #include <sbi/sbi_console.h>
12 #include <sbi/sbi_domain.h>
13 #include <sbi/sbi_hartmask.h>
14 #include <sbi/sbi_hsm.h>
15 #include <sbi/sbi_math.h>
16 #include <sbi/sbi_platform.h>
17 #include <sbi/sbi_scratch.h>
18 #include <sbi/sbi_string.h>
21 * We allocate an extra element because sbi_domain_for_each() expects
22 * the array to be null-terminated.
24 struct sbi_domain *domidx_to_domain_table[SBI_DOMAIN_MAX_INDEX + 1] = { 0 };
25 struct sbi_domain *hartid_to_domain_table[SBI_HARTMASK_MAX_BITS] = { 0 };
26 static u32 domain_count = 0;
27 static bool domain_finalized = false;
29 static struct sbi_hartmask root_hmask = { 0 };
31 #define ROOT_REGION_MAX 16
32 static u32 root_memregs_count = 0;
33 static struct sbi_domain_memregion root_memregs[ROOT_REGION_MAX + 1] = { 0 };
35 struct sbi_domain root = {
37 .possible_harts = &root_hmask,
38 .regions = root_memregs,
39 .system_reset_allowed = true,
40 .system_suspend_allowed = true,
41 .fw_region_inited = false,
44 bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid)
47 return sbi_hartmask_test_hart(hartid, &dom->assigned_harts);
52 ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
55 ulong ret, bword, boff;
60 bword = BIT_WORD(hbase);
61 boff = BIT_WORD_OFFSET(hbase);
63 ret = sbi_hartmask_bits(&dom->assigned_harts)[bword++] >> boff;
64 if (boff && bword < BIT_WORD(SBI_HARTMASK_MAX_BITS)) {
65 ret |= (sbi_hartmask_bits(&dom->assigned_harts)[bword] &
66 (BIT(boff) - 1UL)) << (BITS_PER_LONG - boff);
72 void sbi_domain_memregion_init(unsigned long addr,
75 struct sbi_domain_memregion *reg)
77 unsigned long base = 0, order;
79 for (order = log2roundup(size) ; order <= __riscv_xlen; order++) {
80 if (order < __riscv_xlen) {
81 base = addr & ~((1UL << order) - 1UL);
83 (addr < (base + (1UL << order))) &&
84 (base <= (addr + size - 1UL)) &&
85 ((addr + size - 1UL) < (base + (1UL << order))))
101 bool sbi_domain_check_addr(const struct sbi_domain *dom,
102 unsigned long addr, unsigned long mode,
103 unsigned long access_flags)
105 bool rmmio, mmio = false;
106 struct sbi_domain_memregion *reg;
107 unsigned long rstart, rend, rflags, rwx = 0, rrwx = 0;
113 * Use M_{R/W/X} bits because the SU-bits are at the
114 * same relative offsets. If the mode is not M, the SU
115 * bits will fall at same offsets after the shift.
117 if (access_flags & SBI_DOMAIN_READ)
118 rwx |= SBI_DOMAIN_MEMREGION_M_READABLE;
120 if (access_flags & SBI_DOMAIN_WRITE)
121 rwx |= SBI_DOMAIN_MEMREGION_M_WRITABLE;
123 if (access_flags & SBI_DOMAIN_EXECUTE)
124 rwx |= SBI_DOMAIN_MEMREGION_M_EXECUTABLE;
126 if (access_flags & SBI_DOMAIN_MMIO)
129 sbi_domain_for_each_memregion(dom, reg) {
131 rrwx = (mode == PRV_M ?
132 (rflags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK) :
133 (rflags & SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK)
134 >> SBI_DOMAIN_MEMREGION_SU_ACCESS_SHIFT);
137 rend = (reg->order < __riscv_xlen) ?
138 rstart + ((1UL << reg->order) - 1) : -1UL;
139 if (rstart <= addr && addr <= rend) {
140 rmmio = (rflags & SBI_DOMAIN_MEMREGION_MMIO) ? true : false;
143 return ((rrwx & rwx) == rwx) ? true : false;
147 return (mode == PRV_M) ? true : false;
150 /* Check if region complies with constraints */
151 static bool is_region_valid(const struct sbi_domain_memregion *reg)
153 if (reg->order < 3 || __riscv_xlen < reg->order)
156 if (reg->order == __riscv_xlen && reg->base != 0)
159 if (reg->order < __riscv_xlen && (reg->base & (BIT(reg->order) - 1)))
165 /** Check if regionA is sub-region of regionB */
166 static bool is_region_subset(const struct sbi_domain_memregion *regA,
167 const struct sbi_domain_memregion *regB)
169 ulong regA_start = regA->base;
170 ulong regA_end = regA->base + (BIT(regA->order) - 1);
171 ulong regB_start = regB->base;
172 ulong regB_end = regB->base + (BIT(regB->order) - 1);
174 if ((regB_start <= regA_start) &&
175 (regA_start < regB_end) &&
176 (regB_start < regA_end) &&
177 (regA_end <= regB_end))
183 /** Check if regionA conflicts regionB */
184 static bool is_region_conflict(const struct sbi_domain_memregion *regA,
185 const struct sbi_domain_memregion *regB)
187 if ((is_region_subset(regA, regB) || is_region_subset(regB, regA)) &&
188 regA->flags == regB->flags)
194 /** Check if regionA should be placed before regionB */
195 static bool is_region_before(const struct sbi_domain_memregion *regA,
196 const struct sbi_domain_memregion *regB)
198 if (regA->order < regB->order)
201 if ((regA->order == regB->order) &&
202 (regA->base < regB->base))
208 static const struct sbi_domain_memregion *find_region(
209 const struct sbi_domain *dom,
212 unsigned long rstart, rend;
213 struct sbi_domain_memregion *reg;
215 sbi_domain_for_each_memregion(dom, reg) {
217 rend = (reg->order < __riscv_xlen) ?
218 rstart + ((1UL << reg->order) - 1) : -1UL;
219 if (rstart <= addr && addr <= rend)
226 static const struct sbi_domain_memregion *find_next_subset_region(
227 const struct sbi_domain *dom,
228 const struct sbi_domain_memregion *reg,
231 struct sbi_domain_memregion *sreg, *ret = NULL;
233 sbi_domain_for_each_memregion(dom, sreg) {
234 if (sreg == reg || (sreg->base <= addr) ||
235 !is_region_subset(sreg, reg))
238 if (!ret || (sreg->base < ret->base) ||
239 ((sreg->base == ret->base) && (sreg->order < ret->order)))
246 static int sanitize_domain(const struct sbi_platform *plat,
247 struct sbi_domain *dom)
250 struct sbi_domain_memregion treg, *reg, *reg1;
252 /* Check possible HARTs */
253 if (!dom->possible_harts) {
254 sbi_printf("%s: %s possible HART mask is NULL\n",
255 __func__, dom->name);
258 sbi_hartmask_for_each_hart(i, dom->possible_harts) {
259 if (sbi_platform_hart_invalid(plat, i)) {
260 sbi_printf("%s: %s possible HART mask has invalid "
261 "hart %d\n", __func__, dom->name, i);
266 /* Check memory regions */
268 sbi_printf("%s: %s regions is NULL\n",
269 __func__, dom->name);
272 sbi_domain_for_each_memregion(dom, reg) {
273 if (!is_region_valid(reg)) {
274 sbi_printf("%s: %s has invalid region base=0x%lx "
275 "order=%lu flags=0x%lx\n", __func__,
276 dom->name, reg->base, reg->order,
282 /* Count memory regions */
284 sbi_domain_for_each_memregion(dom, reg)
287 /* Check presence of firmware regions */
288 if (!dom->fw_region_inited) {
289 sbi_printf("%s: %s does not have firmware region\n",
290 __func__, dom->name);
294 /* Sort the memory regions */
295 for (i = 0; i < (count - 1); i++) {
296 reg = &dom->regions[i];
297 for (j = i + 1; j < count; j++) {
298 reg1 = &dom->regions[j];
300 if (is_region_conflict(reg1, reg)) {
301 sbi_printf("%s: %s conflict between regions "
302 "(base=0x%lx order=%lu flags=0x%lx) and "
303 "(base=0x%lx order=%lu flags=0x%lx)\n",
305 reg->base, reg->order, reg->flags,
306 reg1->base, reg1->order, reg1->flags);
310 if (!is_region_before(reg1, reg))
313 sbi_memcpy(&treg, reg1, sizeof(treg));
314 sbi_memcpy(reg1, reg, sizeof(treg));
315 sbi_memcpy(reg, &treg, sizeof(treg));
320 * We don't need to check boot HART id of domain because if boot
321 * HART id is not possible/assigned to this domain then it won't
322 * be started at boot-time by sbi_domain_finalize().
328 * We only allow next mode to be S-mode or U-mode, so that we can
329 * protect M-mode context and enforce checks on memory accesses.
331 if (dom->next_mode != PRV_S &&
332 dom->next_mode != PRV_U) {
333 sbi_printf("%s: %s invalid next booting stage mode 0x%lx\n",
334 __func__, dom->name, dom->next_mode);
338 /* Check next address and next mode */
339 if (!sbi_domain_check_addr(dom, dom->next_addr, dom->next_mode,
340 SBI_DOMAIN_EXECUTE)) {
341 sbi_printf("%s: %s next booting stage address 0x%lx can't "
342 "execute\n", __func__, dom->name, dom->next_addr);
349 bool sbi_domain_check_addr_range(const struct sbi_domain *dom,
350 unsigned long addr, unsigned long size,
352 unsigned long access_flags)
354 unsigned long max = addr + size;
355 const struct sbi_domain_memregion *reg, *sreg;
361 reg = find_region(dom, addr);
365 if (!sbi_domain_check_addr(dom, addr, mode, access_flags))
368 sreg = find_next_subset_region(dom, reg, addr);
371 else if (reg->order < __riscv_xlen)
372 addr = reg->base + (1UL << reg->order);
380 void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
383 unsigned long rstart, rend;
384 struct sbi_domain_memregion *reg;
386 sbi_printf("Domain%d Name %s: %s\n",
387 dom->index, suffix, dom->name);
389 sbi_printf("Domain%d Boot HART %s: %d\n",
390 dom->index, suffix, dom->boot_hartid);
393 sbi_printf("Domain%d HARTs %s: ", dom->index, suffix);
394 sbi_hartmask_for_each_hart(i, dom->possible_harts)
395 sbi_printf("%s%d%s", (k++) ? "," : "",
396 i, sbi_domain_is_assigned_hart(dom, i) ? "*" : "");
400 sbi_domain_for_each_memregion(dom, reg) {
402 rend = (reg->order < __riscv_xlen) ?
403 rstart + ((1UL << reg->order) - 1) : -1UL;
405 sbi_printf("Domain%d Region%02d %s: 0x%" PRILX "-0x%" PRILX " ",
406 dom->index, i, suffix, rstart, rend);
411 if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
412 sbi_printf("%cI", (k++) ? ',' : '(');
413 if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE)
414 sbi_printf("%cR", (k++) ? ',' : '(');
415 if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE)
416 sbi_printf("%cW", (k++) ? ',' : '(');
417 if (reg->flags & SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
418 sbi_printf("%cX", (k++) ? ',' : '(');
419 sbi_printf("%s ", (k++) ? ")" : "()");
423 if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
424 sbi_printf("%cR", (k++) ? ',' : '(');
425 if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
426 sbi_printf("%cW", (k++) ? ',' : '(');
427 if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
428 sbi_printf("%cX", (k++) ? ',' : '(');
429 sbi_printf("%s\n", (k++) ? ")" : "()");
434 sbi_printf("Domain%d Next Address%s: 0x%" PRILX "\n",
435 dom->index, suffix, dom->next_addr);
437 sbi_printf("Domain%d Next Arg1 %s: 0x%" PRILX "\n",
438 dom->index, suffix, dom->next_arg1);
440 sbi_printf("Domain%d Next Mode %s: ", dom->index, suffix);
441 switch (dom->next_mode) {
443 sbi_printf("M-mode\n");
446 sbi_printf("S-mode\n");
449 sbi_printf("U-mode\n");
452 sbi_printf("Unknown\n");
456 sbi_printf("Domain%d SysReset %s: %s\n",
457 dom->index, suffix, (dom->system_reset_allowed) ? "yes" : "no");
459 sbi_printf("Domain%d SysSuspend %s: %s\n",
460 dom->index, suffix, (dom->system_suspend_allowed) ? "yes" : "no");
463 void sbi_domain_dump_all(const char *suffix)
466 const struct sbi_domain *dom;
468 sbi_domain_for_each(i, dom) {
469 sbi_domain_dump(dom, suffix);
474 int sbi_domain_register(struct sbi_domain *dom,
475 const struct sbi_hartmask *assign_mask)
479 struct sbi_domain *tdom;
480 u32 cold_hartid = current_hartid();
481 const struct sbi_platform *plat = sbi_platform_thishart_ptr();
484 if (!dom || !assign_mask || domain_finalized)
487 /* Check if domain already discovered */
488 sbi_domain_for_each(i, tdom) {
494 * Ensure that we have room for Domain Index to
497 if (SBI_DOMAIN_MAX_INDEX <= domain_count) {
498 sbi_printf("%s: No room for %s\n",
499 __func__, dom->name);
503 /* Sanitize discovered domain */
504 rc = sanitize_domain(plat, dom);
506 sbi_printf("%s: sanity checks failed for"
507 " %s (error %d)\n", __func__,
512 /* Assign index to domain */
513 dom->index = domain_count++;
514 domidx_to_domain_table[dom->index] = dom;
516 /* Clear assigned HARTs of domain */
517 sbi_hartmask_clear_all(&dom->assigned_harts);
519 /* Assign domain to HART if HART is a possible HART */
520 sbi_hartmask_for_each_hart(i, assign_mask) {
521 if (!sbi_hartmask_test_hart(i, dom->possible_harts))
524 tdom = hartid_to_domain_table[i];
526 sbi_hartmask_clear_hart(i,
527 &tdom->assigned_harts);
528 hartid_to_domain_table[i] = dom;
529 sbi_hartmask_set_hart(i, &dom->assigned_harts);
532 * If cold boot HART is assigned to this domain then
533 * override boot HART of this domain.
535 if (i == cold_hartid &&
536 dom->boot_hartid != cold_hartid) {
537 sbi_printf("Domain%d Boot HARTID forced to"
538 " %d\n", dom->index, cold_hartid);
539 dom->boot_hartid = cold_hartid;
546 int sbi_domain_root_add_memregion(const struct sbi_domain_memregion *reg)
550 struct sbi_domain_memregion *nreg, *nreg1, *nreg2;
551 const struct sbi_platform *plat = sbi_platform_thishart_ptr();
554 if (!reg || domain_finalized ||
555 (root.regions != root_memregs) ||
556 (ROOT_REGION_MAX <= root_memregs_count))
559 /* Check for conflicts */
560 sbi_domain_for_each_memregion(&root, nreg) {
561 if (is_region_conflict(reg, nreg)) {
562 sbi_printf("%s: is_region_conflict check failed"
563 " 0x%lx conflicts existing 0x%lx\n", __func__,
564 reg->base, nreg->base);
569 /* Append the memregion to root memregions */
570 nreg = &root_memregs[root_memregs_count];
571 sbi_memcpy(nreg, reg, sizeof(*reg));
572 root_memregs_count++;
573 root_memregs[root_memregs_count].order = 0;
575 /* Sort and optimize root regions */
577 /* Sanitize the root domain so that memregions are sorted */
578 rc = sanitize_domain(plat, &root);
580 sbi_printf("%s: sanity checks failed for"
581 " %s (error %d)\n", __func__,
586 /* Merge consecutive memregions with same order and flags */
588 sbi_domain_for_each_memregion(&root, nreg) {
593 if (!(nreg->base & (BIT(nreg->order + 1) - 1)) &&
594 (nreg->base + BIT(nreg->order)) == nreg1->base &&
595 nreg->order == nreg1->order &&
596 nreg->flags == nreg1->flags) {
598 while (nreg1->order) {
600 sbi_memcpy(nreg1, nreg2, sizeof(*nreg1));
604 root_memregs_count--;
607 } while (reg_merged);
612 int sbi_domain_root_add_memrange(unsigned long addr, unsigned long size,
613 unsigned long align, unsigned long region_flags)
616 unsigned long pos, end, rsize;
617 struct sbi_domain_memregion reg;
622 rsize = pos & (align - 1);
624 rsize = 1UL << sbi_ffs(pos);
626 rsize = ((end - pos) < align) ?
629 sbi_domain_memregion_init(pos, rsize, region_flags, ®);
630 rc = sbi_domain_root_add_memregion(®);
639 int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
643 struct sbi_domain *dom;
644 const struct sbi_platform *plat = sbi_platform_ptr(scratch);
646 /* Initialize and populate domains for the platform */
647 rc = sbi_platform_domains_init(plat);
649 sbi_printf("%s: platform domains_init() failed (error %d)\n",
654 /* Startup boot HART of domains */
655 sbi_domain_for_each(i, dom) {
656 /* Domain boot HART */
657 dhart = dom->boot_hartid;
659 /* Ignore of boot HART is off limits */
660 if (SBI_HARTMASK_MAX_BITS <= dhart)
663 /* Ignore if boot HART not possible for this domain */
664 if (!sbi_hartmask_test_hart(dhart, dom->possible_harts))
667 /* Ignore if boot HART assigned different domain */
668 if (sbi_hartid_to_domain(dhart) != dom ||
669 !sbi_hartmask_test_hart(dhart, &dom->assigned_harts))
672 /* Startup boot HART of domain */
673 if (dhart == cold_hartid) {
674 scratch->next_addr = dom->next_addr;
675 scratch->next_mode = dom->next_mode;
676 scratch->next_arg1 = dom->next_arg1;
678 rc = sbi_hsm_hart_start(scratch, NULL, dhart,
683 sbi_printf("%s: failed to start boot HART %d"
684 " for %s (error %d)\n", __func__,
685 dhart, dom->name, rc);
692 * Set the finalized flag so that the root domain
693 * regions can't be changed.
695 domain_finalized = true;
700 int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
703 const struct sbi_platform *plat = sbi_platform_ptr(scratch);
705 if (scratch->fw_rw_offset == 0 ||
706 (scratch->fw_rw_offset & (scratch->fw_rw_offset - 1)) != 0) {
707 sbi_printf("%s: fw_rw_offset is not a power of 2 (0x%lx)\n",
708 __func__, scratch->fw_rw_offset);
712 if ((scratch->fw_start & (scratch->fw_rw_offset - 1)) != 0) {
713 sbi_printf("%s: fw_start and fw_rw_offset not aligned\n",
718 /* Root domain firmware memory region */
719 sbi_domain_memregion_init(scratch->fw_start, scratch->fw_rw_offset,
720 (SBI_DOMAIN_MEMREGION_M_READABLE |
721 SBI_DOMAIN_MEMREGION_M_EXECUTABLE),
722 &root_memregs[root_memregs_count++]);
724 sbi_domain_memregion_init((scratch->fw_start + scratch->fw_rw_offset),
725 (scratch->fw_size - scratch->fw_rw_offset),
726 (SBI_DOMAIN_MEMREGION_M_READABLE |
727 SBI_DOMAIN_MEMREGION_M_WRITABLE),
728 &root_memregs[root_memregs_count++]);
730 root.fw_region_inited = true;
732 /* Root domain allow everything memory region */
733 sbi_domain_memregion_init(0, ~0UL,
734 (SBI_DOMAIN_MEMREGION_READABLE |
735 SBI_DOMAIN_MEMREGION_WRITEABLE |
736 SBI_DOMAIN_MEMREGION_EXECUTABLE),
737 &root_memregs[root_memregs_count++]);
739 /* Root domain memory region end */
740 root_memregs[root_memregs_count].order = 0;
742 /* Root domain boot HART id is same as coldboot HART id */
743 root.boot_hartid = cold_hartid;
745 /* Root domain next booting stage details */
746 root.next_arg1 = scratch->next_arg1;
747 root.next_addr = scratch->next_addr;
748 root.next_mode = scratch->next_mode;
750 /* Root domain possible and assigned HARTs */
751 for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
752 if (sbi_platform_hart_invalid(plat, i))
754 sbi_hartmask_set_hart(i, &root_hmask);
757 return sbi_domain_register(&root, &root_hmask);