2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12 #include <linux/export.h>
13 #include <linux/kernel.h>
14 #include <linux/bitmap.h>
15 #include <linux/sched.h>
16 #include <linux/poll.h>
17 #include <linux/pid.h>
20 #include <linux/slab.h>
21 #include <asm/cputable.h>
22 #include <asm/current.h>
23 #include <asm/copro.h>
28 #define CXL_NUM_MINORS 256 /* Total to reserve */
30 #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
31 #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
32 #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
33 #define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
34 #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
35 #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
37 #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
39 #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
43 static struct class *cxl_class;
45 static int __afu_open(struct inode *inode, struct file *file, bool master)
49 struct cxl_context *ctx;
50 int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
51 int slice = CXL_DEVT_AFU(inode->i_rdev);
54 pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
56 if (!(adapter = get_cxl_adapter(adapter_num)))
59 if (slice > adapter->slices)
62 spin_lock(&adapter->afu_list_lock);
63 if (!(afu = adapter->afu[slice])) {
64 spin_unlock(&adapter->afu_list_lock);
69 * taking a ref to the afu so that it doesn't go away
70 * for rest of the function. This ref is released before
74 spin_unlock(&adapter->afu_list_lock);
76 if (!afu->current_mode)
79 if (!cxl_ops->link_ok(adapter, afu)) {
84 if (!(ctx = cxl_context_alloc())) {
89 if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
92 pr_devel("afu_open pe: %i\n", ctx->pe);
93 file->private_data = ctx;
96 /* indicate success */
100 /* release the ref taken earlier */
103 put_device(&adapter->dev);
107 int afu_open(struct inode *inode, struct file *file)
109 return __afu_open(inode, file, false);
112 static int afu_master_open(struct inode *inode, struct file *file)
114 return __afu_open(inode, file, true);
117 int afu_release(struct inode *inode, struct file *file)
119 struct cxl_context *ctx = file->private_data;
121 pr_devel("%s: closing cxl file descriptor. pe: %i\n",
123 cxl_context_detach(ctx);
127 * Delete the context's mapping pointer, unless it's created by the
128 * kernel API, in which case leave it so it can be freed by reclaim_ctx()
130 if (!ctx->kernelapi) {
131 mutex_lock(&ctx->mapping_lock);
133 mutex_unlock(&ctx->mapping_lock);
137 * At this this point all bottom halfs have finished and we should be
138 * getting no more IRQs from the hardware for this context. Once it's
139 * removed from the IDR (and RCU synchronised) it's safe to free the
142 cxl_context_free(ctx);
147 static long afu_ioctl_start_work(struct cxl_context *ctx,
148 struct cxl_ioctl_start_work __user *uwork)
150 struct cxl_ioctl_start_work work;
154 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
156 /* Do this outside the status_mutex to avoid a circular dependency with
157 * the locking in cxl_mmap_fault() */
158 if (copy_from_user(&work, uwork,
159 sizeof(struct cxl_ioctl_start_work))) {
164 mutex_lock(&ctx->status_mutex);
165 if (ctx->status != OPENED) {
171 * if any of the reserved fields are set or any of the unused
172 * flags are set it's invalid
174 if (work.reserved1 || work.reserved2 || work.reserved3 ||
175 work.reserved4 || work.reserved5 || work.reserved6 ||
176 (work.flags & ~CXL_START_WORK_ALL)) {
181 if (!(work.flags & CXL_START_WORK_NUM_IRQS))
182 work.num_interrupts = ctx->afu->pp_irqs;
183 else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
184 (work.num_interrupts > ctx->afu->irqs_max)) {
188 if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
191 if (work.flags & CXL_START_WORK_AMR)
192 amr = work.amr & mfspr(SPRN_UAMOR);
194 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
197 * Increment the mapped context count for adapter. This also checks
198 * if adapter_context_lock is taken.
200 rc = cxl_adapter_context_get(ctx->afu->adapter);
202 afu_release_irqs(ctx, ctx);
207 * We grab the PID here and not in the file open to allow for the case
208 * where a process (master, some daemon, etc) has opened the chardev on
209 * behalf of another process, so the AFU's mm gets bound to the process
210 * that performs this ioctl and not the process that opened the file.
211 * Also we grab the PID of the group leader so that if the task that
212 * has performed the attach operation exits the mm context of the
213 * process is still accessible.
215 ctx->pid = get_task_pid(current, PIDTYPE_PID);
216 ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
219 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
221 if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
223 afu_release_irqs(ctx, ctx);
224 cxl_adapter_context_put(ctx->afu->adapter);
227 ctx->glpid = ctx->pid = NULL;
231 ctx->status = STARTED;
234 mutex_unlock(&ctx->status_mutex);
238 static long afu_ioctl_process_element(struct cxl_context *ctx,
241 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
243 if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
249 static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
250 struct cxl_afu_id __user *upafuid)
252 struct cxl_afu_id afuid = { 0 };
254 afuid.card_id = ctx->afu->adapter->adapter_num;
255 afuid.afu_offset = ctx->afu->slice;
256 afuid.afu_mode = ctx->afu->current_mode;
258 /* set the flag bit in case the afu is a slave */
259 if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
260 afuid.flags |= CXL_AFUID_FLAG_SLAVE;
262 if (copy_to_user(upafuid, &afuid, sizeof(afuid)))
268 long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
270 struct cxl_context *ctx = file->private_data;
272 if (ctx->status == CLOSED)
275 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
278 pr_devel("afu_ioctl\n");
280 case CXL_IOCTL_START_WORK:
281 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
282 case CXL_IOCTL_GET_PROCESS_ELEMENT:
283 return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
284 case CXL_IOCTL_GET_AFU_ID:
285 return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
291 static long afu_compat_ioctl(struct file *file, unsigned int cmd,
294 return afu_ioctl(file, cmd, arg);
297 int afu_mmap(struct file *file, struct vm_area_struct *vm)
299 struct cxl_context *ctx = file->private_data;
301 /* AFU must be started before we can MMIO */
302 if (ctx->status != STARTED)
305 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
308 return cxl_context_iomap(ctx, vm);
311 static inline bool ctx_event_pending(struct cxl_context *ctx)
313 if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
316 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
322 unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
324 struct cxl_context *ctx = file->private_data;
329 poll_wait(file, &ctx->wq, poll);
331 pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
333 spin_lock_irqsave(&ctx->lock, flags);
334 if (ctx_event_pending(ctx))
335 mask |= POLLIN | POLLRDNORM;
336 else if (ctx->status == CLOSED)
337 /* Only error on closed when there are no futher events pending
340 spin_unlock_irqrestore(&ctx->lock, flags);
342 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
347 static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
349 struct cxl_event *event,
350 struct cxl_event_afu_driver_reserved *pl)
354 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
358 /* Check event size */
359 event->header.size += pl->data_size;
360 if (event->header.size > CXL_READ_MIN_SIZE) {
361 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
365 /* Copy event header */
366 if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
367 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
371 /* Copy event data */
372 buf += sizeof(struct cxl_event_header);
373 if (copy_to_user(buf, &pl->data, pl->data_size)) {
374 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
378 ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
379 return event->header.size;
382 ssize_t afu_read(struct file *file, char __user *buf, size_t count,
385 struct cxl_context *ctx = file->private_data;
386 struct cxl_event_afu_driver_reserved *pl = NULL;
387 struct cxl_event event;
392 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
395 if (count < CXL_READ_MIN_SIZE)
398 spin_lock_irqsave(&ctx->lock, flags);
401 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
402 if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
405 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
410 if (file->f_flags & O_NONBLOCK) {
415 if (signal_pending(current)) {
420 spin_unlock_irqrestore(&ctx->lock, flags);
421 pr_devel("afu_read going to sleep...\n");
423 pr_devel("afu_read woken up\n");
424 spin_lock_irqsave(&ctx->lock, flags);
427 finish_wait(&ctx->wq, &wait);
429 memset(&event, 0, sizeof(event));
430 event.header.process_element = ctx->pe;
431 event.header.size = sizeof(struct cxl_event_header);
432 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
433 pr_devel("afu_read delivering AFU driver specific event\n");
434 pl = ctx->afu_driver_ops->fetch_event(ctx);
435 atomic_dec(&ctx->afu_driver_events);
436 event.header.type = CXL_EVENT_AFU_DRIVER;
437 } else if (ctx->pending_irq) {
438 pr_devel("afu_read delivering AFU interrupt\n");
439 event.header.size += sizeof(struct cxl_event_afu_interrupt);
440 event.header.type = CXL_EVENT_AFU_INTERRUPT;
441 event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
442 clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
443 if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
444 ctx->pending_irq = false;
445 } else if (ctx->pending_fault) {
446 pr_devel("afu_read delivering data storage fault\n");
447 event.header.size += sizeof(struct cxl_event_data_storage);
448 event.header.type = CXL_EVENT_DATA_STORAGE;
449 event.fault.addr = ctx->fault_addr;
450 event.fault.dsisr = ctx->fault_dsisr;
451 ctx->pending_fault = false;
452 } else if (ctx->pending_afu_err) {
453 pr_devel("afu_read delivering afu error\n");
454 event.header.size += sizeof(struct cxl_event_afu_error);
455 event.header.type = CXL_EVENT_AFU_ERROR;
456 event.afu_error.error = ctx->afu_err;
457 ctx->pending_afu_err = false;
458 } else if (ctx->status == CLOSED) {
459 pr_devel("afu_read fatal error\n");
460 spin_unlock_irqrestore(&ctx->lock, flags);
463 WARN(1, "afu_read must be buggy\n");
465 spin_unlock_irqrestore(&ctx->lock, flags);
467 if (event.header.type == CXL_EVENT_AFU_DRIVER)
468 return afu_driver_event_copy(ctx, buf, &event, pl);
470 if (copy_to_user(buf, &event, event.header.size))
472 return event.header.size;
475 finish_wait(&ctx->wq, &wait);
476 spin_unlock_irqrestore(&ctx->lock, flags);
481 * Note: if this is updated, we need to update api.c to patch the new ones in
484 const struct file_operations afu_fops = {
485 .owner = THIS_MODULE,
489 .release = afu_release,
490 .unlocked_ioctl = afu_ioctl,
491 .compat_ioctl = afu_compat_ioctl,
495 static const struct file_operations afu_master_fops = {
496 .owner = THIS_MODULE,
497 .open = afu_master_open,
500 .release = afu_release,
501 .unlocked_ioctl = afu_ioctl,
502 .compat_ioctl = afu_compat_ioctl,
507 static char *cxl_devnode(struct device *dev, umode_t *mode)
509 if (cpu_has_feature(CPU_FTR_HVMODE) &&
510 CXL_DEVT_IS_CARD(dev->devt)) {
512 * These minor numbers will eventually be used to program the
513 * PSL and AFUs once we have dynamic reprogramming support
517 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
520 extern struct class *cxl_class;
522 static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
523 struct device **chardev, char *postfix, char *desc,
524 const struct file_operations *fops)
529 cdev_init(cdev, fops);
530 if ((rc = cdev_add(cdev, devt, 1))) {
531 dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
535 dev = device_create(cxl_class, &afu->dev, devt, afu,
536 "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
538 dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
551 int cxl_chardev_d_afu_add(struct cxl_afu *afu)
553 return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
554 &afu->chardev_d, "d", "dedicated",
555 &afu_master_fops); /* Uses master fops */
558 int cxl_chardev_m_afu_add(struct cxl_afu *afu)
560 return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
561 &afu->chardev_m, "m", "master",
565 int cxl_chardev_s_afu_add(struct cxl_afu *afu)
567 return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
568 &afu->chardev_s, "s", "shared",
572 void cxl_chardev_afu_remove(struct cxl_afu *afu)
574 if (afu->chardev_d) {
575 cdev_del(&afu->afu_cdev_d);
576 device_unregister(afu->chardev_d);
577 afu->chardev_d = NULL;
579 if (afu->chardev_m) {
580 cdev_del(&afu->afu_cdev_m);
581 device_unregister(afu->chardev_m);
582 afu->chardev_m = NULL;
584 if (afu->chardev_s) {
585 cdev_del(&afu->afu_cdev_s);
586 device_unregister(afu->chardev_s);
587 afu->chardev_s = NULL;
591 int cxl_register_afu(struct cxl_afu *afu)
593 afu->dev.class = cxl_class;
595 return device_register(&afu->dev);
598 int cxl_register_adapter(struct cxl *adapter)
600 adapter->dev.class = cxl_class;
603 * Future: When we support dynamically reprogramming the PSL & AFU we
604 * will expose the interface to do that via a chardev:
605 * adapter->dev.devt = CXL_CARD_MKDEV(adapter);
608 return device_register(&adapter->dev);
611 dev_t cxl_get_dev(void)
616 int __init cxl_file_init(void)
621 * If these change we really need to update API. Either change some
622 * flags or update API version number CXL_API_VERSION.
624 BUILD_BUG_ON(CXL_API_VERSION != 3);
625 BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
626 BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
627 BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
628 BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
629 BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
631 if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
632 pr_err("Unable to allocate CXL major number: %i\n", rc);
636 pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
638 cxl_class = class_create(THIS_MODULE, "cxl");
639 if (IS_ERR(cxl_class)) {
640 pr_err("Unable to create CXL class\n");
641 rc = PTR_ERR(cxl_class);
644 cxl_class->devnode = cxl_devnode;
649 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
653 void cxl_file_exit(void)
655 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
656 class_destroy(cxl_class);