bool
default n
+config CXL_AFU_DRIVER_OPS
+ bool
+ default n
+
config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
select CXL_KERNEL_API
select CXL_EEH
+ select CXL_AFU_DRIVER_OPS
default m
help
Select this option to enable driver support for IBM Coherent
}
EXPORT_SYMBOL_GPL(cxl_fops_get_context);
+void cxl_set_driver_ops(struct cxl_context *ctx,
+ struct cxl_afu_driver_ops *ops)
+{
+ WARN_ON(!ops->fetch_event || !ops->event_delivered);
+ atomic_set(&ctx->afu_driver_events, 0);
+ ctx->afu_driver_ops = ops;
+}
+EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
+
+void cxl_context_events_pending(struct cxl_context *ctx,
+ unsigned int new_events)
+{
+ atomic_add(new_events, &ctx->afu_driver_events);
+ wake_up_all(&ctx->wq);
+}
+EXPORT_SYMBOL_GPL(cxl_context_events_pending);
+
int cxl_start_work(struct cxl_context *ctx,
struct cxl_ioctl_start_work *work)
{
#include <asm/reg.h>
#include <misc/cxl-base.h>
+#include <misc/cxl.h>
#include <uapi/misc/cxl.h>
extern uint cxl_verbose;
* Bump version each time a user API change is made, whether it is
* backwards compatible ot not.
*/
-#define CXL_API_VERSION 2
+#define CXL_API_VERSION 3
#define CXL_API_VERSION_COMPATIBLE 1
/*
bool pending_fault;
bool pending_afu_err;
+ /* Used by AFU drivers for driver specific event delivery */
+ struct cxl_afu_driver_ops *afu_driver_ops;
+ atomic_t afu_driver_events;
+
struct rcu_head rcu;
};
return cxl_context_iomap(ctx, vm);
}
+static inline bool ctx_event_pending(struct cxl_context *ctx)
+{
+ if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
+ return true;
+
+ if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
+ return true;
+
+ return false;
+}
+
unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
{
struct cxl_context *ctx = file->private_data;
pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
spin_lock_irqsave(&ctx->lock, flags);
- if (ctx->pending_irq || ctx->pending_fault ||
- ctx->pending_afu_err)
+ if (ctx_event_pending(ctx))
mask |= POLLIN | POLLRDNORM;
else if (ctx->status == CLOSED)
/* Only error on closed when there are no futher events pending
return mask;
}
-static inline int ctx_event_pending(struct cxl_context *ctx)
+static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
+ char __user *buf,
+ struct cxl_event *event,
+ struct cxl_event_afu_driver_reserved *pl)
{
- return (ctx->pending_irq || ctx->pending_fault ||
- ctx->pending_afu_err || (ctx->status == CLOSED));
+ /* Check event */
+ if (!pl) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
+ return -EFAULT;
+ }
+
+ /* Check event size */
+ event->header.size += pl->data_size;
+ if (event->header.size > CXL_READ_MIN_SIZE) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
+ return -EFAULT;
+ }
+
+ /* Copy event header */
+ if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
+ return -EFAULT;
+ }
+
+ /* Copy event data */
+ buf += sizeof(struct cxl_event_header);
+ if (copy_to_user(buf, &pl->data, pl->data_size)) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
+ return -EFAULT;
+ }
+
+ ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
+ return event->header.size;
}
ssize_t afu_read(struct file *file, char __user *buf, size_t count,
loff_t *off)
{
struct cxl_context *ctx = file->private_data;
+ struct cxl_event_afu_driver_reserved *pl = NULL;
struct cxl_event event;
unsigned long flags;
int rc;
for (;;) {
prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
- if (ctx_event_pending(ctx))
+ if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
break;
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
memset(&event, 0, sizeof(event));
event.header.process_element = ctx->pe;
event.header.size = sizeof(struct cxl_event_header);
- if (ctx->pending_irq) {
+ if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
+ pr_devel("afu_read delivering AFU driver specific event\n");
+ pl = ctx->afu_driver_ops->fetch_event(ctx);
+ atomic_dec(&ctx->afu_driver_events);
+ event.header.type = CXL_EVENT_AFU_DRIVER;
+ } else if (ctx->pending_irq) {
pr_devel("afu_read delivering AFU interrupt\n");
event.header.size += sizeof(struct cxl_event_afu_interrupt);
event.header.type = CXL_EVENT_AFU_INTERRUPT;
spin_unlock_irqrestore(&ctx->lock, flags);
+ if (event.header.type == CXL_EVENT_AFU_DRIVER)
+ return afu_driver_event_copy(ctx, buf, &event, pl);
+
if (copy_to_user(buf, &event, event.header.size))
return -EFAULT;
return event.header.size;
* If these change we really need to update API. Either change some
* flags or update API version number CXL_API_VERSION.
*/
- BUILD_BUG_ON(CXL_API_VERSION != 2);
+ BUILD_BUG_ON(CXL_API_VERSION != 3);
BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
*/
ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count);
+/*
+ * AFU driver ops allow an AFU driver to create their own events to pass to
+ * userspace through the file descriptor as a simpler alternative to overriding
+ * the read() and poll() calls that works with the generic cxl events. These
+ * events are given priority over the generic cxl events, so they will be
+ * delivered first if multiple types of events are pending.
+ *
+ * The AFU driver must call cxl_context_events_pending() to notify the cxl
+ * driver that new events are ready to be delivered for a specific context.
+ * cxl_context_events_pending() will adjust the current count of AFU driver
+ * events for this context, and wake up anyone waiting on the context wait
+ * queue.
+ *
+ * The cxl driver will then call fetch_event() to get a structure defining
+ * the size and address of the driver specific event data. The cxl driver
+ * will build a cxl header with type and process_element fields filled in,
+ * and header.size set to sizeof(struct cxl_event_header) + data_size.
+ * The total size of the event is limited to CXL_READ_MIN_SIZE (4K).
+ *
+ * fetch_event() is called with a spin lock held, so it must not sleep.
+ *
+ * The cxl driver will then deliver the event to userspace, and finally
+ * call event_delivered() to return the status of the operation, identified
+ * by cxl context and AFU driver event data pointers.
+ * 0 Success
+ * -EFAULT copy_to_user() has failed
+ * -EINVAL Event data pointer is NULL, or event size is greater than
+ * CXL_READ_MIN_SIZE.
+ */
+struct cxl_afu_driver_ops {
+ struct cxl_event_afu_driver_reserved *(*fetch_event) (
+ struct cxl_context *ctx);
+ void (*event_delivered) (struct cxl_context *ctx,
+ struct cxl_event_afu_driver_reserved *event,
+ int rc);
+};
+
+/*
+ * Associate the above driver ops with a specific context.
+ * Reset the current count of AFU driver events.
+ */
+void cxl_set_driver_ops(struct cxl_context *ctx,
+ struct cxl_afu_driver_ops *ops);
+
+/* Notify cxl driver that new events are ready to be delivered for context */
+void cxl_context_events_pending(struct cxl_context *ctx,
+ unsigned int new_events);
+
#endif /* _MISC_CXL_H */
CXL_EVENT_AFU_INTERRUPT = 1,
CXL_EVENT_DATA_STORAGE = 2,
CXL_EVENT_AFU_ERROR = 3,
+ CXL_EVENT_AFU_DRIVER = 4,
};
struct cxl_event_header {
__u64 error;
};
+struct cxl_event_afu_driver_reserved {
+ /*
+ * Defines the buffer passed to the cxl driver by the AFU driver.
+ *
+ * This is not ABI since the event header.size passed to the user for
+ * existing events is set in the read call to sizeof(cxl_event_header)
+ * + sizeof(whatever event is being dispatched) and the user is already
+ * required to use a 4K buffer on the read call.
+ *
+ * Of course the contents will be ABI, but that's up the AFU driver.
+ */
+ size_t data_size;
+ u8 data[];
+};
+
struct cxl_event {
struct cxl_event_header header;
union {
struct cxl_event_afu_interrupt irq;
struct cxl_event_data_storage fault;
struct cxl_event_afu_error afu_error;
+ struct cxl_event_afu_driver_reserved afu_driver_event;
};
};