#define SBI_SCRATCH_TMP0_OFFSET (8 * __SIZEOF_POINTER__)
/** Offset of options member in sbi_scratch */
#define SBI_SCRATCH_OPTIONS_OFFSET (9 * __SIZEOF_POINTER__)
-
-/** sbi_ipi_data is located behind sbi_scratch. This struct is not packed. */
-/** Offset of ipi_type in sbi_ipi_data */
-#define SBI_IPI_DATA_IPI_TYPE_OFFSET (15 * __SIZEOF_POINTER__)
-
-#define SBI_SCRATCH_TLB_QUEUE_HEAD_OFFSET (16 * __SIZEOF_POINTER__)
-#define SBI_SCRATCH_TLB_QUEUE_MEM_OFFSET (SBI_SCRATCH_TLB_QUEUE_HEAD_OFFSET + SBI_TLB_INFO_SIZE)
-
+/** Offset of extra space in sbi_scratch */
+#define SBI_SCRATCH_EXTRA_SPACE_OFFSET (10 * __SIZEOF_POINTER__)
/** Maximum size of sbi_scratch and sbi_ipi_data */
#define SBI_SCRATCH_SIZE (64 * __SIZEOF_POINTER__)
#define sbi_scratch_thishart_arg1_ptr() \
((void *)(sbi_scratch_thishart_ptr()->next_arg1))
-/** Get pointer to sbi_ipi_data from sbi_scratch */
-#define sbi_ipi_data_ptr(scratch) \
- ((struct sbi_ipi_data *)(void *)scratch + SBI_IPI_DATA_IPI_TYPE_OFFSET)
+/** Allocate from extra space in sbi_scratch
+ *
+ * @return zero on failure and non-zero (>= SBI_SCRATCH_EXTRA_SPACE_OFFSET)
+ * on success
+ */
+unsigned long sbi_scratch_alloc_offset(unsigned long size, const char *owner);
+
+/** Free-up extra space in sbi_scratch */
+void sbi_scratch_free_offset(unsigned long offset);
-/** Get pointer to tlb flush info fifo header from sbi_scratch */
-#define sbi_tlb_fifo_head_ptr(scratch) \
- ((struct sbi_fifo *)(void *)scratch + SBI_SCRATCH_TLB_QUEUE_HEAD_OFFSET)
+/** Get pointer from offset in sbi_scratch */
+#define sbi_scratch_offset_ptr(scratch, offset) ((void *)scratch + (offset))
-/** Get pointer to tlb flush info fifo queue address from sbi_scratch */
-#define sbi_tlb_fifo_mem_ptr(scratch) \
- (void *)((void *)scratch + SBI_SCRATCH_TLB_QUEUE_MEM_OFFSET)
+/** Get pointer from offset in sbi_scratch for current HART */
+#define sbi_scratch_thishart_offset_ptr(offset) \
+ ((void *)sbi_scratch_thishart_ptr() + (offset))
#endif
#include <sbi/riscv_barrier.h>
#include <sbi/riscv_atomic.h>
#include <sbi/riscv_unpriv.h>
+#include <sbi/sbi_error.h>
#include <sbi/sbi_fifo.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_bitops.h>
#include <sbi/sbi_timer.h>
#include <plat/string.h>
+static unsigned long ipi_data_off;
+static unsigned long ipi_tlb_fifo_off;
+static unsigned long ipi_tlb_fifo_mem_off;
+
static inline int __sbi_tlb_fifo_range_check(struct sbi_tlb_info *curr,
struct sbi_tlb_info *next)
{
void *data)
{
struct sbi_scratch *remote_scratch = NULL;
- const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ struct sbi_ipi_data *ipi_data;
struct sbi_fifo *ipi_tlb_fifo;
struct sbi_tlb_info *tinfo = data;
int ret = SBI_FIFO_UNCHANGED;
* trigger the interrupt
*/
remote_scratch = sbi_hart_id_to_scratch(scratch, hartid);
+ ipi_data = sbi_scratch_offset_ptr(remote_scratch, ipi_data_off);
+ ipi_tlb_fifo = sbi_scratch_offset_ptr(remote_scratch,
+ ipi_tlb_fifo_off);
if (event == SBI_IPI_EVENT_SFENCE_VMA ||
event == SBI_IPI_EVENT_SFENCE_VMA_ASID) {
/*
tinfo->size = SBI_TLB_FLUSH_ALL;
}
- ipi_tlb_fifo = sbi_tlb_fifo_head_ptr(remote_scratch);
- ret = sbi_fifo_inplace_update(ipi_tlb_fifo, data,
- sbi_tlb_fifo_update_cb);
+ ret = sbi_fifo_inplace_update(ipi_tlb_fifo, data,
+ sbi_tlb_fifo_update_cb);
if (ret == SBI_FIFO_SKIP || ret == SBI_FIFO_UPDATED) {
goto done;
}
__asm__ __volatile("nop");
}
}
- atomic_raw_set_bit(event, &sbi_ipi_data_ptr(remote_scratch)->ipi_type);
+ atomic_raw_set_bit(event, &ipi_data->ipi_type);
mb();
sbi_platform_ipi_send(plat, hartid);
if (event != SBI_IPI_EVENT_SOFT)
struct sbi_tlb_info tinfo;
unsigned int ipi_event;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
- struct sbi_fifo *ipi_tlb_fifo = sbi_tlb_fifo_head_ptr(scratch);
+ struct sbi_ipi_data *ipi_data =
+ sbi_scratch_offset_ptr(scratch, ipi_data_off);
+ struct sbi_fifo *ipi_tlb_fifo =
+ sbi_scratch_offset_ptr(scratch, ipi_tlb_fifo_off);
u32 hartid = sbi_current_hartid();
sbi_platform_ipi_clear(plat, hartid);
do {
- ipi_type = sbi_ipi_data_ptr(scratch)->ipi_type;
+ ipi_type = ipi_data->ipi_type;
rmb();
ipi_event = __ffs(ipi_type);
switch (ipi_event) {
sbi_hart_hang();
break;
};
- ipi_type = atomic_raw_clear_bit(
- ipi_event, &sbi_ipi_data_ptr(scratch)->ipi_type);
+ ipi_type = atomic_raw_clear_bit(ipi_event, &ipi_data->ipi_type);
} while (ipi_type > 0);
}
int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
{
- struct sbi_fifo *tlb_info_q = sbi_tlb_fifo_head_ptr(scratch);
+ void *ipi_tlb_mem;
+ struct sbi_fifo *ipi_tlb_q;
+ struct sbi_ipi_data *ipi_data;
+
+ if (cold_boot) {
+ ipi_data_off = sbi_scratch_alloc_offset(sizeof(*ipi_data),
+ "IPI_DATA");
+ if (!ipi_data_off)
+ return SBI_ENOMEM;
+ ipi_tlb_fifo_off = sbi_scratch_alloc_offset(sizeof(*ipi_tlb_q),
+ "IPI_TLB_FIFO");
+ if (!ipi_tlb_fifo_off) {
+ sbi_scratch_free_offset(ipi_data_off);
+ return SBI_ENOMEM;
+ }
+ ipi_tlb_fifo_mem_off = sbi_scratch_alloc_offset(
+ SBI_TLB_FIFO_NUM_ENTRIES * SBI_TLB_INFO_SIZE,
+ "IPI_TLB_FIFO_MEM");
+ if (!ipi_tlb_fifo_mem_off) {
+ sbi_scratch_free_offset(ipi_tlb_fifo_off);
+ sbi_scratch_free_offset(ipi_data_off);
+ return SBI_ENOMEM;
+ }
+ } else {
+ if (!ipi_data_off ||
+ !ipi_tlb_fifo_off ||
+ !ipi_tlb_fifo_mem_off)
+ return SBI_ENOMEM;
+ }
+
+ ipi_data = sbi_scratch_offset_ptr(scratch, ipi_data_off);
+ ipi_tlb_q = sbi_scratch_offset_ptr(scratch, ipi_tlb_fifo_off);
+ ipi_tlb_mem = sbi_scratch_offset_ptr(scratch, ipi_tlb_fifo_mem_off);
- sbi_ipi_data_ptr(scratch)->ipi_type = 0x00;
- sbi_fifo_init(tlb_info_q, sbi_tlb_fifo_mem_ptr(scratch),
+ ipi_data->ipi_type = 0x00;
+ sbi_fifo_init(ipi_tlb_q, ipi_tlb_mem,
SBI_TLB_FIFO_NUM_ENTRIES, SBI_TLB_INFO_SIZE);
/* Enable software interrupts */
--- /dev/null
+ /*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_locks.h>
+#include <sbi/sbi_scratch.h>
+
+static spinlock_t extra_lock = SPIN_LOCK_INITIALIZER;
+static unsigned long extra_offset = SBI_SCRATCH_EXTRA_SPACE_OFFSET;
+
+unsigned long sbi_scratch_alloc_offset(unsigned long size, const char *owner)
+{
+ unsigned long ret = 0;
+
+ /*
+ * We have a simple brain-dead allocator which never expects
+ * anything to be free-ed hence it keeps incrementing the
+ * next allocation offset until it runs-out of space.
+ *
+ * In future, we will have more sophisticated allocator which
+ * will allow us to re-claim free-ed space.
+ */
+
+ if (!size)
+ return 0;
+
+ while (size & (__SIZEOF_POINTER__ - 1))
+ size++;
+
+ spin_lock(&extra_lock);
+
+ if (SBI_SCRATCH_SIZE < (extra_offset + size))
+ goto done;
+
+ ret = extra_offset;
+ extra_offset += size;
+
+done:
+ spin_unlock(&extra_lock);
+
+ return ret;
+}
+
+void sbi_scratch_free_offset(unsigned long offset)
+{
+ if ((offset < SBI_SCRATCH_EXTRA_SPACE_OFFSET) ||
+ (SBI_SCRATCH_SIZE <= offset))
+ return;
+
+ /*
+ * We don't actually free-up because it's a simple
+ * brain-dead allocator.
+ */
+}