C language standard uses true/false for the boolean type.
Let's switch to that for better language compatibility.
Signed-off-by: Bin Meng <bmeng@tinylab.org>
Reviewed-by: Anup Patel <anup@brainfault.org>
Reviewed-by: Samuel Holland <samuel@sholland.org>
Tested-by: Samuel Holland <samuel@sholland.org>
* Check whether given HART is assigned to specified domain
* @param dom pointer to domain
* @param hartid the HART ID
- * @return TRUE if HART is assigned to domain otherwise FALSE
+ * @return true if HART is assigned to domain otherwise false
*/
bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid);
* @param addr the address to be checked
* @param mode the privilege mode of access
* @param access_flags bitmask of domain access types (enum sbi_domain_access)
- * @return TRUE if access allowed otherwise FALSE
+ * @return true if access allowed otherwise false
*/
bool sbi_domain_check_addr(const struct sbi_domain *dom,
unsigned long addr, unsigned long mode,
* Checks if the list is empty or not.
* @param head List head
*
- * Retruns TRUE if list is empty, FALSE otherwise.
+ * Returns true if list is empty, false otherwise.
*/
static inline bool sbi_list_empty(struct sbi_dlist *head)
{
* @param plat pointer to struct sbi_platform
* @param hartid HART ID
*
- * @return TRUE if HART is invalid and FALSE otherwise
+ * @return true if HART is invalid and false otherwise
*/
static inline bool sbi_platform_hart_invalid(const struct sbi_platform *plat,
u32 hartid)
{
if (!plat)
- return TRUE;
+ return true;
if (plat->hart_count <= sbi_platform_hart_index(plat, hartid))
- return TRUE;
- return FALSE;
+ return true;
+ return false;
}
/**
* Early initialization for current HART
*
* @param plat pointer to struct sbi_platform
- * @param cold_boot whether cold boot (TRUE) or warm_boot (FALSE)
+ * @param cold_boot whether cold boot (true) or warm_boot (false)
*
* @return 0 on success and negative error code on failure
*/
* Final initialization for current HART
*
* @param plat pointer to struct sbi_platform
- * @param cold_boot whether cold boot (TRUE) or warm_boot (FALSE)
+ * @param cold_boot whether cold boot (true) or warm_boot (false)
*
* @return 0 on success and negative error code on failure
*/
* Initialize the platform interrupt controller for current HART
*
* @param plat pointer to struct sbi_platform
- * @param cold_boot whether cold boot (TRUE) or warm_boot (FALSE)
+ * @param cold_boot whether cold boot (true) or warm_boot (false)
*
* @return 0 on success and negative error code on failure
*/
* Initialize the platform IPI support for current HART
*
* @param plat pointer to struct sbi_platform
- * @param cold_boot whether cold boot (TRUE) or warm_boot (FALSE)
+ * @param cold_boot whether cold boot (true) or warm_boot (false)
*
* @return 0 on success and negative error code on failure
*/
* Initialize the platform timer for current HART
*
* @param plat pointer to struct sbi_platform
- * @param cold_boot whether cold boot (TRUE) or warm_boot (FALSE)
+ * @param cold_boot whether cold boot (true) or warm_boot (false)
*
* @return 0 on success and negative error code on failure
*/
typedef unsigned long physical_addr_t;
typedef unsigned long physical_size_t;
-#define TRUE 1
-#define FALSE 0
-#define true TRUE
-#define false FALSE
+#define true 1
+#define false 0
#define NULL ((void *)0)
{
if (((31 < c) && (c < 127)) || (c == '\f') || (c == '\r') ||
(c == '\n') || (c == '\t')) {
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
int sbi_getc(void)
.name = "root",
.possible_harts = &root_hmask,
.regions = root_memregs,
- .system_reset_allowed = TRUE,
+ .system_reset_allowed = true,
};
bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid)
if (dom)
return sbi_hartmask_test_hart(hartid, &dom->assigned_harts);
- return FALSE;
+ return false;
}
ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
unsigned long addr, unsigned long mode,
unsigned long access_flags)
{
- bool rmmio, mmio = FALSE;
+ bool rmmio, mmio = false;
struct sbi_domain_memregion *reg;
unsigned long rstart, rend, rflags, rwx = 0;
if (!dom)
- return FALSE;
+ return false;
if (access_flags & SBI_DOMAIN_READ)
rwx |= SBI_DOMAIN_MEMREGION_READABLE;
if (access_flags & SBI_DOMAIN_EXECUTE)
rwx |= SBI_DOMAIN_MEMREGION_EXECUTABLE;
if (access_flags & SBI_DOMAIN_MMIO)
- mmio = TRUE;
+ mmio = true;
sbi_domain_for_each_memregion(dom, reg) {
rflags = reg->flags;
rend = (reg->order < __riscv_xlen) ?
rstart + ((1UL << reg->order) - 1) : -1UL;
if (rstart <= addr && addr <= rend) {
- rmmio = (rflags & SBI_DOMAIN_MEMREGION_MMIO) ? TRUE : FALSE;
+ rmmio = (rflags & SBI_DOMAIN_MEMREGION_MMIO) ? true : false;
if (mmio != rmmio)
- return FALSE;
- return ((rflags & rwx) == rwx) ? TRUE : FALSE;
+ return false;
+ return ((rflags & rwx) == rwx) ? true : false;
}
}
- return (mode == PRV_M) ? TRUE : FALSE;
+ return (mode == PRV_M) ? true : false;
}
/* Check if region complies with constraints */
static bool is_region_valid(const struct sbi_domain_memregion *reg)
{
if (reg->order < 3 || __riscv_xlen < reg->order)
- return FALSE;
+ return false;
if (reg->order == __riscv_xlen && reg->base != 0)
- return FALSE;
+ return false;
if (reg->order < __riscv_xlen && (reg->base & (BIT(reg->order) - 1)))
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
/** Check if regionA is sub-region of regionB */
(regA_start < regB_end) &&
(regB_start < regA_end) &&
(regA_end <= regB_end))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
/** Check if regionA conflicts regionB */
{
if ((is_region_subset(regA, regB) || is_region_subset(regB, regA)) &&
regA->flags == regB->flags)
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
/** Check if regionA should be placed before regionB */
const struct sbi_domain_memregion *regB)
{
if (regA->order < regB->order)
- return TRUE;
+ return true;
if ((regA->order == regB->order) &&
(regA->base < regB->base))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
static int sanitize_domain(const struct sbi_platform *plat,
/* Count memory regions and check presence of firmware region */
count = 0;
- have_fw_reg = FALSE;
+ have_fw_reg = false;
sbi_domain_for_each_memregion(dom, reg) {
if (reg->order == root_fw_region.order &&
reg->base == root_fw_region.base &&
reg->flags == root_fw_region.flags)
- have_fw_reg = TRUE;
+ have_fw_reg = true;
count++;
}
if (!have_fw_reg) {
void sbi_ecall_unregister_extension(struct sbi_ecall_extension *ext)
{
- bool found = FALSE;
+ bool found = false;
struct sbi_ecall_extension *t;
if (!ext)
sbi_list_for_each_entry(t, &ecall_exts_list, head) {
if (t == ext) {
- found = TRUE;
+ found = true;
break;
}
}
regs->a0, regs->a1, smode, regs->a2);
break;
case SBI_EXT_HSM_HART_STOP:
- ret = sbi_hsm_hart_stop(scratch, TRUE);
+ ret = sbi_hsm_hart_stop(scratch, true);
break;
case SBI_EXT_HSM_HART_GET_STATUS:
ret = sbi_hsm_hart_get_state(sbi_domain_thishart_ptr(),
cen = 0;
}
- return ((cen >> hpm_num) & 1) ? TRUE : FALSE;
+ return ((cen >> hpm_num) & 1) ? true : false;
}
int sbi_emulate_csr_read(int csr_num, struct sbi_trap_regs *regs,
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
ulong prev_mode = (regs->mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT;
#if __riscv_xlen == 32
- bool virt = (regs->mstatusH & MSTATUSH_MPV) ? TRUE : FALSE;
+ bool virt = (regs->mstatusH & MSTATUSH_MPV) ? true : false;
#else
- bool virt = (regs->mstatus & MSTATUS_MPV) ? TRUE : FALSE;
+ bool virt = (regs->mstatus & MSTATUS_MPV) ? true : false;
#endif
switch (csr_num) {
int ret = 0;
ulong prev_mode = (regs->mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT;
#if __riscv_xlen == 32
- bool virt = (regs->mstatusH & MSTATUSH_MPV) ? TRUE : FALSE;
+ bool virt = (regs->mstatusH & MSTATUSH_MPV) ? true : false;
#else
- bool virt = (regs->mstatus & MSTATUS_MPV) ? TRUE : FALSE;
+ bool virt = (regs->mstatus & MSTATUS_MPV) ? true : false;
#endif
switch (csr_num) {
/* Note: must be called with fifo->qlock held */
static inline bool __sbi_fifo_is_full(struct sbi_fifo *fifo)
{
- return (fifo->avail == fifo->num_entries) ? TRUE : FALSE;
+ return (fifo->avail == fifo->num_entries) ? true : false;
}
u16 sbi_fifo_avail(struct sbi_fifo *fifo)
/* Note: must be called with fifo->qlock held */
static inline bool __sbi_fifo_is_empty(struct sbi_fifo *fifo)
{
- return (fifo->avail == 0) ? TRUE : FALSE;
+ return (fifo->avail == 0) ? true : false;
}
int sbi_fifo_is_empty(struct sbi_fifo *fifo)
bool sbi_fifo_reset(struct sbi_fifo *fifo)
{
if (!fifo)
- return FALSE;
+ return false;
spin_lock(&fifo->qlock);
__sbi_fifo_reset(fifo);
spin_unlock(&fifo->qlock);
- return TRUE;
+ return true;
}
/**
if (!init_count_offset)
sbi_hart_hang();
- rc = sbi_hsm_init(scratch, hartid, TRUE);
+ rc = sbi_hsm_init(scratch, hartid, true);
if (rc)
sbi_hart_hang();
- rc = sbi_platform_early_init(plat, TRUE);
+ rc = sbi_platform_early_init(plat, true);
if (rc)
sbi_hart_hang();
- rc = sbi_hart_init(scratch, TRUE);
+ rc = sbi_hart_init(scratch, true);
if (rc)
sbi_hart_hang();
if (rc)
sbi_hart_hang();
- rc = sbi_pmu_init(scratch, TRUE);
+ rc = sbi_pmu_init(scratch, true);
if (rc)
sbi_hart_hang();
sbi_boot_print_banner(scratch);
- rc = sbi_irqchip_init(scratch, TRUE);
+ rc = sbi_irqchip_init(scratch, true);
if (rc) {
sbi_printf("%s: irqchip init failed (error %d)\n",
__func__, rc);
sbi_hart_hang();
}
- rc = sbi_ipi_init(scratch, TRUE);
+ rc = sbi_ipi_init(scratch, true);
if (rc) {
sbi_printf("%s: ipi init failed (error %d)\n", __func__, rc);
sbi_hart_hang();
}
- rc = sbi_tlb_init(scratch, TRUE);
+ rc = sbi_tlb_init(scratch, true);
if (rc) {
sbi_printf("%s: tlb init failed (error %d)\n", __func__, rc);
sbi_hart_hang();
}
- rc = sbi_timer_init(scratch, TRUE);
+ rc = sbi_timer_init(scratch, true);
if (rc) {
sbi_printf("%s: timer init failed (error %d)\n", __func__, rc);
sbi_hart_hang();
* Note: Platform final initialization should be last so that
* it sees correct domain assignment and PMP configuration.
*/
- rc = sbi_platform_final_init(plat, TRUE);
+ rc = sbi_platform_final_init(plat, true);
if (rc) {
sbi_printf("%s: platform final init failed (error %d)\n",
__func__, rc);
sbi_hsm_prepare_next_jump(scratch, hartid);
sbi_hart_switch_mode(hartid, scratch->next_arg1, scratch->next_addr,
- scratch->next_mode, FALSE);
+ scratch->next_mode, false);
}
static void init_warm_startup(struct sbi_scratch *scratch, u32 hartid)
if (!init_count_offset)
sbi_hart_hang();
- rc = sbi_hsm_init(scratch, hartid, FALSE);
+ rc = sbi_hsm_init(scratch, hartid, false);
if (rc)
sbi_hart_hang();
- rc = sbi_platform_early_init(plat, FALSE);
+ rc = sbi_platform_early_init(plat, false);
if (rc)
sbi_hart_hang();
- rc = sbi_hart_init(scratch, FALSE);
+ rc = sbi_hart_init(scratch, false);
if (rc)
sbi_hart_hang();
- rc = sbi_pmu_init(scratch, FALSE);
+ rc = sbi_pmu_init(scratch, false);
if (rc)
sbi_hart_hang();
- rc = sbi_irqchip_init(scratch, FALSE);
+ rc = sbi_irqchip_init(scratch, false);
if (rc)
sbi_hart_hang();
- rc = sbi_ipi_init(scratch, FALSE);
+ rc = sbi_ipi_init(scratch, false);
if (rc)
sbi_hart_hang();
- rc = sbi_tlb_init(scratch, FALSE);
+ rc = sbi_tlb_init(scratch, false);
if (rc)
sbi_hart_hang();
- rc = sbi_timer_init(scratch, FALSE);
+ rc = sbi_timer_init(scratch, false);
if (rc)
sbi_hart_hang();
if (rc)
sbi_hart_hang();
- rc = sbi_platform_final_init(plat, FALSE);
+ rc = sbi_platform_final_init(plat, false);
if (rc)
sbi_hart_hang();
sbi_hart_switch_mode(hartid, scratch->next_arg1,
scratch->next_addr,
- scratch->next_mode, FALSE);
+ scratch->next_mode, false);
}
static atomic_t coldboot_lottery = ATOMIC_INITIALIZER(0);
*/
void __noreturn sbi_init(struct sbi_scratch *scratch)
{
- bool next_mode_supported = FALSE;
- bool coldboot = FALSE;
+ bool next_mode_supported = false;
+ bool coldboot = false;
u32 hartid = current_hartid();
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
switch (scratch->next_mode) {
case PRV_M:
- next_mode_supported = TRUE;
+ next_mode_supported = true;
break;
case PRV_S:
if (misa_extension('S'))
- next_mode_supported = TRUE;
+ next_mode_supported = true;
break;
case PRV_U:
if (misa_extension('U'))
- next_mode_supported = TRUE;
+ next_mode_supported = true;
break;
default:
sbi_hart_hang();
*/
if (next_mode_supported && atomic_xchg(&coldboot_lottery, 1) == 0)
- coldboot = TRUE;
+ coldboot = true;
/*
* Do platform specific nascent (very early) initialization so
static void sbi_ipi_process_halt(struct sbi_scratch *scratch)
{
- sbi_hsm_hart_stop(scratch, TRUE);
+ sbi_hsm_hart_stop(scratch, true);
}
static struct sbi_ipi_event_ops ipi_halt_ops = {
* @param evtA Pointer to the existing hw event structure
* @param evtB Pointer to the new hw event structure
*
- * Return FALSE if the range doesn't overlap, TRUE otherwise
+ * Return false if the range doesn't overlap, true otherwise
*/
static bool pmu_event_range_overlap(struct sbi_pmu_hw_event *evtA,
struct sbi_pmu_hw_event *evtB)
/* check if the range of events overlap with a previous entry */
if (((evtA->end_idx < evtB->start_idx) && (evtA->end_idx < evtB->end_idx)) ||
((evtA->start_idx > evtB->start_idx) && (evtA->start_idx > evtB->end_idx)))
- return FALSE;
- return TRUE;
+ return false;
+ return true;
}
static bool pmu_event_select_overlap(struct sbi_pmu_hw_event *evt,
uint64_t select_val, uint64_t select_mask)
{
if ((evt->select == select_val) && (evt->select_mask == select_mask))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
static int pmu_event_validate(unsigned long event_idx)
int event_idx_type;
uint32_t event_code;
int ret = SBI_EINVAL;
- bool bUpdate = FALSE;
+ bool bUpdate = false;
int i, cidx;
if ((cbase + sbi_fls(cmask)) >= total_ctrs)
return ret;
if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE)
- bUpdate = TRUE;
+ bUpdate = true;
for_each_set_bit(i, &cmask, total_ctrs) {
cidx = i + cbase;
}
/* Stop current HART */
- sbi_hsm_hart_stop(scratch, FALSE);
+ sbi_hsm_hart_stop(scratch, false);
/* Platform specific reset if domain allowed system reset */
if (dom->system_reset_allowed) {
{
ulong hstatus, vsstatus, prev_mode;
#if __riscv_xlen == 32
- bool prev_virt = (regs->mstatusH & MSTATUSH_MPV) ? TRUE : FALSE;
+ bool prev_virt = (regs->mstatusH & MSTATUSH_MPV) ? true : false;
#else
- bool prev_virt = (regs->mstatus & MSTATUS_MPV) ? TRUE : FALSE;
+ bool prev_virt = (regs->mstatus & MSTATUS_MPV) ? true : false;
#endif
/* By default, we redirect to HS-mode */
- bool next_virt = FALSE;
+ bool next_virt = false;
/* Sanity check on previous mode */
prev_mode = (regs->mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT;
if (misa_extension('H') && prev_virt) {
if ((trap->cause < __riscv_xlen) &&
(csr_read(CSR_HEDELEG) & BIT(trap->cause))) {
- next_virt = TRUE;
+ next_virt = true;
}
}
/* Read "system-reset-allowed" DT property */
if (fdt_get_property(fdt, domain_offset,
"system-reset-allowed", NULL))
- dom->system_reset_allowed = TRUE;
+ dom->system_reset_allowed = true;
else
- dom->system_reset_allowed = FALSE;
+ dom->system_reset_allowed = false;
/* Find /cpus DT node */
cpus_offset = fdt_path_offset(fdt, "/cpus");
static int gpio_system_poweroff_check(u32 type, u32 reason)
{
- if (gpio_reset_get(FALSE, type))
+ if (gpio_reset_get(false, type))
return 128;
return 0;
static void gpio_system_poweroff(u32 type, u32 reason)
{
- gpio_reset_exec(gpio_reset_get(FALSE, type));
+ gpio_reset_exec(gpio_reset_get(false, type));
}
static struct sbi_system_reset_device gpio_poweroff = {
static int gpio_system_restart_check(u32 type, u32 reason)
{
- if (gpio_reset_get(TRUE, type))
+ if (gpio_reset_get(true, type))
return 128;
return 0;
static void gpio_system_restart(u32 type, u32 reason)
{
- gpio_reset_exec(gpio_reset_get(TRUE, type));
+ gpio_reset_exec(gpio_reset_get(true, type));
}
static struct sbi_system_reset_device gpio_restart = {
}
static const struct fdt_match gpio_poweroff_match[] = {
- { .compatible = "gpio-poweroff", .data = (const void *)FALSE },
+ { .compatible = "gpio-poweroff", .data = (const void *)false },
{ },
};
};
static const struct fdt_match gpio_reset_match[] = {
- { .compatible = "gpio-restart", .data = (const void *)TRUE },
+ { .compatible = "gpio-restart", .data = (const void *)true },
{ },
};
.mtimecmp_size = ACLINT_DEFAULT_MTIMECMP_SIZE,
.first_hartid = 0,
.hart_count = ARIANE_HART_COUNT,
- .has_64bit_mmio = TRUE,
+ .has_64bit_mmio = true,
};
/*
.mtimecmp_size = ACLINT_DEFAULT_MTIMECMP_SIZE,
.first_hartid = 0,
.hart_count = OPENPITON_DEFAULT_HART_COUNT,
- .has_64bit_mmio = TRUE,
+ .has_64bit_mmio = true,
};
/*
}
static const struct fdt_match da9063_reset_match[] = {
- { .compatible = "dlg,da9063", .data = (void *)TRUE },
+ { .compatible = "dlg,da9063", .data = (void *)true },
{ },
};
.mtimecmp_size = ACLINT_DEFAULT_MTIMECMP_SIZE,
.first_hartid = 0,
.hart_count = K210_HART_COUNT,
- .has_64bit_mmio = TRUE,
+ .has_64bit_mmio = true,
};
static u32 k210_get_clk_freq(void)
.mtimecmp_size = ACLINT_DEFAULT_MTIMECMP_SIZE,
.first_hartid = 0,
.hart_count = UX600_HART_COUNT,
- .has_64bit_mmio = TRUE,
+ .has_64bit_mmio = true,
};
static u32 measure_cpu_freq(u32 n)
.mtimecmp_size = ACLINT_DEFAULT_MTIMECMP_SIZE,
.first_hartid = 0,
.hart_count = PLATFORM_HART_COUNT,
- .has_64bit_mmio = TRUE,
+ .has_64bit_mmio = true,
};
/*