/* clang-format off */
-#define SBI_IPI_EVENT_SOFT 0x1
-#define SBI_IPI_EVENT_FENCE 0x2
-#define SBI_IPI_EVENT_HALT 0x10
+#define SBI_IPI_EVENT_MAX __riscv_xlen
/* clang-format on */
struct sbi_scratch;
+/** IPI event operations or callbacks */
+struct sbi_ipi_event_ops {
+ /** Name of the IPI event operations */
+ char name[32];
+
+ /** Update callback to save/enqueue data for remote HART
+ * Note: This is an optional callback and it is called just before
+ * triggering IPI to remote HART.
+ */
+ int (* update)(struct sbi_scratch *scratch,
+ struct sbi_scratch *remote_scratch,
+ u32 remote_hartid, void *data);
+
+ /** Sync callback to wait for remote HART
+ * Note: This is an optional callback and it is called just after
+ * triggering IPI to remote HART.
+ */
+ void (* sync)(struct sbi_scratch *scratch);
+
+ /** Process callback to handle IPI event
+ * Note: This is a mandatory callback and it is called on the
+ * remote HART after IPI is triggered.
+ */
+ void (* process)(struct sbi_scratch *scratch);
+};
+
int sbi_ipi_send_many(struct sbi_scratch *scratch, ulong hmask,
ulong hbase, u32 event, void *data);
+int sbi_ipi_event_create(const struct sbi_ipi_event_ops *ops);
+
+void sbi_ipi_event_destroy(u32 event);
+
int sbi_ipi_send_smode(struct sbi_scratch *scratch, ulong hmask, ulong hbase);
void sbi_ipi_clear_smode(struct sbi_scratch *scratch);
#define SBI_TLB_INFO_SIZE sizeof(struct sbi_tlb_info)
-int sbi_tlb_fifo_update(struct sbi_scratch *scratch, u32 hartid, void *data);
-
-void sbi_tlb_fifo_process(struct sbi_scratch *scratch);
-
-void sbi_tlb_fifo_sync(struct sbi_scratch *scratch);
-
int sbi_tlb_fifo_request(struct sbi_scratch *scratch, ulong hmask,
ulong hbase, struct sbi_tlb_info *tinfo);
static unsigned long ipi_data_off;
-static int sbi_ipi_send(struct sbi_scratch *scratch, u32 hartid, u32 event,
- void *data)
+static const struct sbi_ipi_event_ops *ipi_ops_array[SBI_IPI_EVENT_MAX];
+
+static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartid,
+ u32 event, void *data)
{
int ret;
struct sbi_scratch *remote_scratch = NULL;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
struct sbi_ipi_data *ipi_data;
+ const struct sbi_ipi_event_ops *ipi_ops;
- if (sbi_platform_hart_disabled(plat, hartid))
- return -1;
+ if ((SBI_IPI_EVENT_MAX <= event) ||
+ !ipi_ops_array[event] ||
+ sbi_platform_hart_disabled(plat, remote_hartid))
+ return SBI_EINVAL;
+ ipi_ops = ipi_ops_array[event];
/*
* Set IPI type on remote hart's scratch area and
* trigger the interrupt
*/
- remote_scratch = sbi_hart_id_to_scratch(scratch, hartid);
+ remote_scratch = sbi_hart_id_to_scratch(scratch, remote_hartid);
ipi_data = sbi_scratch_offset_ptr(remote_scratch, ipi_data_off);
- if (event == SBI_IPI_EVENT_FENCE) {
- ret = sbi_tlb_fifo_update(remote_scratch, hartid, data);
+
+ if (ipi_ops->update) {
+ ret = ipi_ops->update(scratch, remote_scratch,
+ remote_hartid, data);
if (ret < 0)
return ret;
}
+
atomic_raw_set_bit(event, &ipi_data->ipi_type);
smp_wmb();
- sbi_platform_ipi_send(plat, hartid);
+ sbi_platform_ipi_send(plat, remote_hartid);
- if (event == SBI_IPI_EVENT_FENCE)
- sbi_tlb_fifo_sync(scratch);
+ if (ipi_ops->sync)
+ ipi_ops->sync(scratch);
return 0;
}
return 0;
}
+int sbi_ipi_event_create(const struct sbi_ipi_event_ops *ops)
+{
+ int i, ret = SBI_ENOSPC;
+
+ if (!ops || !ops->process)
+ return SBI_EINVAL;
+
+ for (i = 0; i < SBI_IPI_EVENT_MAX; i++) {
+ if (!ipi_ops_array[i]) {
+ ret = i;
+ ipi_ops_array[i] = ops;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void sbi_ipi_event_destroy(u32 event)
+{
+ if (SBI_IPI_EVENT_MAX <= event)
+ return;
+
+ ipi_ops_array[event] = NULL;
+}
+
+static void sbi_ipi_process_smode(struct sbi_scratch *scratch)
+{
+ csr_set(CSR_MIP, MIP_SSIP);
+}
+
+static struct sbi_ipi_event_ops ipi_smode_ops = {
+ .name = "IPI_SMODE",
+ .process = sbi_ipi_process_smode,
+};
+
+static u32 ipi_smode_event = SBI_IPI_EVENT_MAX;
+
int sbi_ipi_send_smode(struct sbi_scratch *scratch, ulong hmask, ulong hbase)
{
- return sbi_ipi_send_many(scratch, hmask, hbase,
- SBI_IPI_EVENT_SOFT, NULL);
+ return sbi_ipi_send_many(scratch, hmask, hbase, ipi_smode_event, NULL);
}
void sbi_ipi_clear_smode(struct sbi_scratch *scratch)
csr_clear(CSR_MIP, MIP_SSIP);
}
+static void sbi_ipi_process_halt(struct sbi_scratch *scratch)
+{
+ sbi_exit(scratch);
+}
+
+static struct sbi_ipi_event_ops ipi_halt_ops = {
+ .name = "IPI_HALT",
+ .process = sbi_ipi_process_halt,
+};
+
+static u32 ipi_halt_event = SBI_IPI_EVENT_MAX;
+
int sbi_ipi_send_halt(struct sbi_scratch *scratch, ulong hmask, ulong hbase)
{
- return sbi_ipi_send_many(scratch, hmask, hbase,
- SBI_IPI_EVENT_HALT, NULL);
+ return sbi_ipi_send_many(scratch, hmask, hbase, ipi_halt_event, NULL);
}
void sbi_ipi_process(struct sbi_scratch *scratch)
{
unsigned long ipi_type;
unsigned int ipi_event;
+ const struct sbi_ipi_event_ops *ipi_ops;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
struct sbi_ipi_data *ipi_data =
sbi_scratch_offset_ptr(scratch, ipi_data_off);
if (!(ipi_type & 1UL))
goto skip;
- switch (ipi_event) {
- case SBI_IPI_EVENT_SOFT:
- csr_set(CSR_MIP, MIP_SSIP);
- break;
- case SBI_IPI_EVENT_FENCE:
- sbi_tlb_fifo_process(scratch);
- break;
- case SBI_IPI_EVENT_HALT:
- sbi_exit(scratch);
- break;
- default:
- break;
- };
+ ipi_ops = ipi_ops_array[ipi_event];
+ if (ipi_ops && ipi_ops->process)
+ ipi_ops->process(scratch);
skip:
ipi_type = ipi_type >> 1;
"IPI_DATA");
if (!ipi_data_off)
return SBI_ENOMEM;
+ ret = sbi_ipi_event_create(&ipi_smode_ops);
+ if (ret < 0)
+ return ret;
+ ipi_smode_event = ret;
+ ret = sbi_ipi_event_create(&ipi_halt_ops);
+ if (ret < 0)
+ return ret;
+ ipi_halt_event = ret;
} else {
if (!ipi_data_off)
return SBI_ENOMEM;
+ if (SBI_IPI_EVENT_MAX <= ipi_smode_event ||
+ SBI_IPI_EVENT_MAX <= ipi_halt_event)
+ return SBI_ENOSPC;
}
ipi_data = sbi_scratch_offset_ptr(scratch, ipi_data_off);
}
}
-void sbi_tlb_fifo_process(struct sbi_scratch *scratch)
+static void sbi_tlb_fifo_process(struct sbi_scratch *scratch)
{
struct sbi_tlb_info tinfo;
struct sbi_fifo *tlb_fifo =
sbi_tlb_entry_process(scratch, &tinfo);
}
-void sbi_tlb_fifo_sync(struct sbi_scratch *scratch)
+static void sbi_tlb_fifo_sync(struct sbi_scratch *scratch)
{
unsigned long *tlb_sync =
sbi_scratch_offset_ptr(scratch, tlb_sync_off);
return ret;
}
-int sbi_tlb_fifo_update(struct sbi_scratch *rscratch, u32 hartid, void *data)
+static int sbi_tlb_fifo_update(struct sbi_scratch *scratch,
+ struct sbi_scratch *remote_scratch,
+ u32 remote_hartid, void *data)
{
int ret;
struct sbi_fifo *tlb_fifo_r;
- struct sbi_scratch *lscratch;
struct sbi_tlb_info *tinfo = data;
u32 curr_hartid = sbi_current_hartid();
* If the request is to queue a tlb flush entry for itself
* then just do a local flush and return;
*/
- if (hartid == curr_hartid) {
+ if (remote_hartid == curr_hartid) {
sbi_tlb_local_flush(tinfo);
return -1;
}
- lscratch = sbi_hart_id_to_scratch(rscratch, curr_hartid);
- tlb_fifo_r = sbi_scratch_offset_ptr(rscratch, tlb_fifo_off);
+ tlb_fifo_r = sbi_scratch_offset_ptr(remote_scratch, tlb_fifo_off);
ret = sbi_fifo_inplace_update(tlb_fifo_r, data, sbi_tlb_fifo_update_cb);
if (ret != SBI_FIFO_UNCHANGED) {
* TODO: Introduce a wait/wakeup event mechanism to handle
* this properly.
*/
- sbi_tlb_fifo_process_count(lscratch, 1);
- sbi_dprintf(rscratch, "hart%d: hart%d tlb fifo full\n",
- curr_hartid, hartid);
+ sbi_tlb_fifo_process_count(scratch, 1);
+ sbi_dprintf(remote_scratch, "hart%d: hart%d tlb fifo full\n",
+ curr_hartid, remote_hartid);
}
return 0;
}
+static struct sbi_ipi_event_ops tlb_ops = {
+ .name = "IPI_TLB",
+ .update = sbi_tlb_fifo_update,
+ .sync = sbi_tlb_fifo_sync,
+ .process = sbi_tlb_fifo_process,
+};
+
+static u32 tlb_event = SBI_IPI_EVENT_MAX;
+
int sbi_tlb_fifo_request(struct sbi_scratch *scratch, ulong hmask,
ulong hbase, struct sbi_tlb_info *tinfo)
{
- return sbi_ipi_send_many(scratch, hmask, hbase,
- SBI_IPI_EVENT_FENCE, tinfo);
+ return sbi_ipi_send_many(scratch, hmask, hbase, tlb_event, tinfo);
}
int sbi_tlb_fifo_init(struct sbi_scratch *scratch, bool cold_boot)
{
+ int ret;
void *tlb_mem;
unsigned long *tlb_sync;
struct sbi_fifo *tlb_q;
sbi_scratch_free_offset(tlb_sync_off);
return SBI_ENOMEM;
}
+ ret = sbi_ipi_event_create(&tlb_ops);
+ if (ret < 0) {
+ sbi_scratch_free_offset(tlb_fifo_mem_off);
+ sbi_scratch_free_offset(tlb_fifo_off);
+ sbi_scratch_free_offset(tlb_sync_off);
+ return ret;
+ }
+ tlb_event = ret;
tlb_range_flush_limit = sbi_platform_tlbr_flush_limit(plat);
} else {
if (!tlb_sync_off ||
!tlb_fifo_off ||
!tlb_fifo_mem_off)
return SBI_ENOMEM;
+ if (SBI_IPI_EVENT_MAX <= tlb_event)
+ return SBI_ENOSPC;
}
tlb_sync = sbi_scratch_offset_ptr(scratch, tlb_sync_off);