1 // SPDX-License-Identifier: GPL-2.0
3 * cdev.c - Character device component for Mostcore
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
8 #include <linux/module.h>
9 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/cdev.h>
14 #include <linux/poll.h>
15 #include <linux/kfifo.h>
16 #include <linux/uaccess.h>
17 #include <linux/idr.h>
18 #include <linux/most.h>
20 #define CHRDEV_REGION_SIZE 50
22 static struct cdev_component {
27 struct most_component cc;
32 spinlock_t unlink; /* synchronization lock to unlink channels */
35 struct mutex io_mutex;
36 struct most_interface *iface;
37 struct most_channel_config *cfg;
38 unsigned int channel_id;
41 DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
43 struct list_head list;
46 #define to_channel(d) container_of(d, struct comp_channel, cdev)
47 static LIST_HEAD(channel_list);
48 static DEFINE_SPINLOCK(ch_list_lock);
50 static inline bool ch_has_mbo(struct comp_channel *c)
52 return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
55 static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
57 if (!kfifo_peek(&c->fifo, mbo)) {
58 *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
60 kfifo_in(&c->fifo, mbo, 1);
65 static struct comp_channel *get_channel(struct most_interface *iface, int id)
67 struct comp_channel *c, *tmp;
70 spin_lock_irqsave(&ch_list_lock, flags);
71 list_for_each_entry_safe(c, tmp, &channel_list, list) {
72 if ((c->iface == iface) && (c->channel_id == id)) {
73 spin_unlock_irqrestore(&ch_list_lock, flags);
77 spin_unlock_irqrestore(&ch_list_lock, flags);
81 static void stop_channel(struct comp_channel *c)
85 while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
87 most_stop_channel(c->iface, c->channel_id, &comp.cc);
90 static void destroy_cdev(struct comp_channel *c)
94 device_destroy(comp.class, c->devno);
96 spin_lock_irqsave(&ch_list_lock, flags);
98 spin_unlock_irqrestore(&ch_list_lock, flags);
101 static void destroy_channel(struct comp_channel *c)
103 ida_simple_remove(&comp.minor_id, MINOR(c->devno));
104 kfifo_free(&c->fifo);
109 * comp_open - implements the syscall to open the device
110 * @inode: inode pointer
111 * @filp: file pointer
113 * This stores the channel pointer in the private data field of
114 * the file structure and activates the channel within the core.
116 static int comp_open(struct inode *inode, struct file *filp)
118 struct comp_channel *c;
121 c = to_channel(inode->i_cdev);
122 filp->private_data = c;
124 if (((c->cfg->direction == MOST_CH_RX) &&
125 ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
126 ((c->cfg->direction == MOST_CH_TX) &&
127 ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
131 mutex_lock(&c->io_mutex);
133 mutex_unlock(&c->io_mutex);
138 mutex_unlock(&c->io_mutex);
143 ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
146 mutex_unlock(&c->io_mutex);
151 * comp_close - implements the syscall to close the device
152 * @inode: inode pointer
153 * @filp: file pointer
155 * This stops the channel within the core.
157 static int comp_close(struct inode *inode, struct file *filp)
159 struct comp_channel *c = to_channel(inode->i_cdev);
161 mutex_lock(&c->io_mutex);
162 spin_lock(&c->unlink);
164 spin_unlock(&c->unlink);
167 mutex_unlock(&c->io_mutex);
169 mutex_unlock(&c->io_mutex);
176 * comp_write - implements the syscall to write to the device
177 * @filp: file pointer
178 * @buf: pointer to user buffer
179 * @count: number of bytes to write
180 * @offset: offset from where to start writing
182 static ssize_t comp_write(struct file *filp, const char __user *buf,
183 size_t count, loff_t *offset)
186 size_t to_copy, left;
187 struct mbo *mbo = NULL;
188 struct comp_channel *c = filp->private_data;
190 mutex_lock(&c->io_mutex);
191 while (c->dev && !ch_get_mbo(c, &mbo)) {
192 mutex_unlock(&c->io_mutex);
194 if ((filp->f_flags & O_NONBLOCK))
196 if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
198 mutex_lock(&c->io_mutex);
201 if (unlikely(!c->dev)) {
206 to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
207 left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
208 if (left == to_copy) {
213 c->mbo_offs += to_copy - left;
214 if (c->mbo_offs >= c->cfg->buffer_size ||
215 c->cfg->data_type == MOST_CH_CONTROL ||
216 c->cfg->data_type == MOST_CH_ASYNC) {
217 kfifo_skip(&c->fifo);
218 mbo->buffer_length = c->mbo_offs;
220 most_submit_mbo(mbo);
223 ret = to_copy - left;
225 mutex_unlock(&c->io_mutex);
230 * comp_read - implements the syscall to read from the device
231 * @filp: file pointer
232 * @buf: pointer to user buffer
233 * @count: number of bytes to read
234 * @offset: offset from where to start reading
237 comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
239 size_t to_copy, not_copied, copied;
240 struct mbo *mbo = NULL;
241 struct comp_channel *c = filp->private_data;
243 mutex_lock(&c->io_mutex);
244 while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
245 mutex_unlock(&c->io_mutex);
246 if (filp->f_flags & O_NONBLOCK)
248 if (wait_event_interruptible(c->wq,
249 (!kfifo_is_empty(&c->fifo) ||
252 mutex_lock(&c->io_mutex);
255 /* make sure we don't submit to gone devices */
256 if (unlikely(!c->dev)) {
257 mutex_unlock(&c->io_mutex);
261 to_copy = min_t(size_t,
263 mbo->processed_length - c->mbo_offs);
265 not_copied = copy_to_user(buf,
266 mbo->virt_address + c->mbo_offs,
269 copied = to_copy - not_copied;
271 c->mbo_offs += copied;
272 if (c->mbo_offs >= mbo->processed_length) {
273 kfifo_skip(&c->fifo);
277 mutex_unlock(&c->io_mutex);
281 static __poll_t comp_poll(struct file *filp, poll_table *wait)
283 struct comp_channel *c = filp->private_data;
286 poll_wait(filp, &c->wq, wait);
288 mutex_lock(&c->io_mutex);
289 if (c->cfg->direction == MOST_CH_RX) {
290 if (!c->dev || !kfifo_is_empty(&c->fifo))
291 mask |= EPOLLIN | EPOLLRDNORM;
293 if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
294 mask |= EPOLLOUT | EPOLLWRNORM;
296 mutex_unlock(&c->io_mutex);
301 * Initialization of struct file_operations
303 static const struct file_operations channel_fops = {
304 .owner = THIS_MODULE,
308 .release = comp_close,
313 * comp_disconnect_channel - disconnect a channel
314 * @iface: pointer to interface instance
315 * @channel_id: channel index
317 * This frees allocated memory and removes the cdev that represents this
318 * channel in user space.
320 static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
322 struct comp_channel *c;
324 c = get_channel(iface, channel_id);
328 mutex_lock(&c->io_mutex);
329 spin_lock(&c->unlink);
331 spin_unlock(&c->unlink);
335 wake_up_interruptible(&c->wq);
336 mutex_unlock(&c->io_mutex);
338 mutex_unlock(&c->io_mutex);
345 * comp_rx_completion - completion handler for rx channels
346 * @mbo: pointer to buffer object that has completed
348 * This searches for the channel linked to this MBO and stores it in the local
351 static int comp_rx_completion(struct mbo *mbo)
353 struct comp_channel *c;
358 c = get_channel(mbo->ifp, mbo->hdm_channel_id);
362 spin_lock(&c->unlink);
363 if (!c->access_ref || !c->dev) {
364 spin_unlock(&c->unlink);
367 kfifo_in(&c->fifo, &mbo, 1);
368 spin_unlock(&c->unlink);
370 if (kfifo_is_full(&c->fifo))
371 dev_warn(c->dev, "Fifo is full\n");
373 wake_up_interruptible(&c->wq);
378 * comp_tx_completion - completion handler for tx channels
379 * @iface: pointer to interface instance
380 * @channel_id: channel index/ID
382 * This wakes sleeping processes in the wait-queue.
384 static int comp_tx_completion(struct most_interface *iface, int channel_id)
386 struct comp_channel *c;
388 c = get_channel(iface, channel_id);
392 if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
393 dev_warn(c->dev, "Channel ID out of range\n");
397 wake_up_interruptible(&c->wq);
402 * comp_probe - probe function of the driver module
403 * @iface: pointer to interface instance
404 * @channel_id: channel index/ID
405 * @cfg: pointer to actual channel configuration
406 * @name: name of the device to be created
408 * This allocates achannel object and creates the device node in /dev
410 * Returns 0 on success or error code otherwise.
412 static int comp_probe(struct most_interface *iface, int channel_id,
413 struct most_channel_config *cfg, char *name, char *args)
415 struct comp_channel *c;
416 unsigned long cl_flags;
423 c = get_channel(iface, channel_id);
427 current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
428 if (current_minor < 0)
429 return current_minor;
431 c = kzalloc(sizeof(*c), GFP_KERNEL);
437 c->devno = MKDEV(comp.major, current_minor);
438 cdev_init(&c->cdev, &channel_fops);
439 c->cdev.owner = THIS_MODULE;
440 retval = cdev_add(&c->cdev, c->devno, 1);
445 c->channel_id = channel_id;
447 spin_lock_init(&c->unlink);
449 retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
451 goto err_del_cdev_and_free_channel;
452 init_waitqueue_head(&c->wq);
453 mutex_init(&c->io_mutex);
454 spin_lock_irqsave(&ch_list_lock, cl_flags);
455 list_add_tail(&c->list, &channel_list);
456 spin_unlock_irqrestore(&ch_list_lock, cl_flags);
457 c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
459 if (IS_ERR(c->dev)) {
460 retval = PTR_ERR(c->dev);
461 goto err_free_kfifo_and_del_list;
463 kobject_uevent(&c->dev->kobj, KOBJ_ADD);
466 err_free_kfifo_and_del_list:
467 kfifo_free(&c->fifo);
469 err_del_cdev_and_free_channel:
474 ida_simple_remove(&comp.minor_id, current_minor);
478 static struct cdev_component comp = {
482 .probe_channel = comp_probe,
483 .disconnect_channel = comp_disconnect_channel,
484 .rx_completion = comp_rx_completion,
485 .tx_completion = comp_tx_completion,
489 static int __init most_cdev_init(void)
493 comp.class = class_create(THIS_MODULE, "most_cdev");
494 if (IS_ERR(comp.class))
495 return PTR_ERR(comp.class);
497 ida_init(&comp.minor_id);
499 err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
502 comp.major = MAJOR(comp.devno);
503 err = most_register_component(&comp.cc);
506 err = most_register_configfs_subsys(&comp.cc);
508 goto deregister_comp;
512 most_deregister_component(&comp.cc);
514 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
516 ida_destroy(&comp.minor_id);
517 class_destroy(comp.class);
521 static void __exit most_cdev_exit(void)
523 struct comp_channel *c, *tmp;
525 most_deregister_configfs_subsys(&comp.cc);
526 most_deregister_component(&comp.cc);
528 list_for_each_entry_safe(c, tmp, &channel_list, list) {
532 unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
533 ida_destroy(&comp.minor_id);
534 class_destroy(comp.class);
537 module_init(most_cdev_init);
538 module_exit(most_cdev_exit);
539 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
540 MODULE_LICENSE("GPL");
541 MODULE_DESCRIPTION("character device component for mostcore");