static struct aim_channel *get_channel(struct most_interface *iface, int id)
{
- struct aim_channel *channel, *tmp;
+ struct aim_channel *c, *tmp;
unsigned long flags;
int found_channel = 0;
spin_lock_irqsave(&ch_list_lock, flags);
- list_for_each_entry_safe(channel, tmp, &channel_list, list) {
- if ((channel->iface == iface) && (channel->channel_id == id)) {
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ if ((c->iface == iface) && (c->channel_id == id)) {
found_channel = 1;
break;
}
spin_unlock_irqrestore(&ch_list_lock, flags);
if (!found_channel)
return NULL;
- return channel;
+ return c;
}
static void stop_channel(struct aim_channel *c)
*/
static int aim_open(struct inode *inode, struct file *filp)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
int ret;
- channel = to_channel(inode->i_cdev);
- filp->private_data = channel;
+ c = to_channel(inode->i_cdev);
+ filp->private_data = c;
- if (((channel->cfg->direction == MOST_CH_RX) &&
+ if (((c->cfg->direction == MOST_CH_RX) &&
((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
- ((channel->cfg->direction == MOST_CH_TX) &&
+ ((c->cfg->direction == MOST_CH_TX) &&
((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
pr_info("WARN: Access flags mismatch\n");
return -EACCES;
}
- if (!atomic_inc_and_test(&channel->access_ref)) {
+ if (!atomic_inc_and_test(&c->access_ref)) {
pr_info("WARN: Device is busy\n");
- atomic_dec(&channel->access_ref);
+ atomic_dec(&c->access_ref);
return -EBUSY;
}
- ret = most_start_channel(channel->iface, channel->channel_id,
+ ret = most_start_channel(c->iface, c->channel_id,
&cdev_aim);
if (ret)
- atomic_dec(&channel->access_ref);
+ atomic_dec(&c->access_ref);
return ret;
}
*/
static int aim_close(struct inode *inode, struct file *filp)
{
- struct aim_channel *channel = to_channel(inode->i_cdev);
-
- mutex_lock(&channel->io_mutex);
- if (!channel->dev) {
- mutex_unlock(&channel->io_mutex);
- atomic_dec(&channel->access_ref);
- destroy_cdev(channel);
- kfree(channel);
+ struct aim_channel *c = to_channel(inode->i_cdev);
+
+ mutex_lock(&c->io_mutex);
+ if (!c->dev) {
+ mutex_unlock(&c->io_mutex);
+ atomic_dec(&c->access_ref);
+ destroy_cdev(c);
+ kfree(c);
return 0;
}
- mutex_unlock(&channel->io_mutex);
+ mutex_unlock(&c->io_mutex);
- stop_channel(channel);
- atomic_dec(&channel->access_ref);
+ stop_channel(c);
+ atomic_dec(&c->access_ref);
return 0;
}
size_t max_len = 0;
ssize_t retval;
struct mbo *mbo;
- struct aim_channel *channel = filp->private_data;
+ struct aim_channel *c = filp->private_data;
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
+ mutex_lock(&c->io_mutex);
+ if (unlikely(!c->dev)) {
+ mutex_unlock(&c->io_mutex);
return -EPIPE;
}
- mutex_unlock(&channel->io_mutex);
+ mutex_unlock(&c->io_mutex);
- mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
+ mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
if (!mbo) {
if ((filp->f_flags & O_NONBLOCK))
return -EAGAIN;
if (wait_event_interruptible(
- channel->wq,
- (mbo = most_get_mbo(channel->iface,
- channel->channel_id,
+ c->wq,
+ (mbo = most_get_mbo(c->iface,
+ c->channel_id,
&cdev_aim)) ||
- (!channel->dev)))
+ (!c->dev)))
return -ERESTARTSYS;
}
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
+ mutex_lock(&c->io_mutex);
+ if (unlikely(!c->dev)) {
+ mutex_unlock(&c->io_mutex);
err = -EPIPE;
goto error;
}
- mutex_unlock(&channel->io_mutex);
+ mutex_unlock(&c->io_mutex);
- max_len = channel->cfg->buffer_size;
+ max_len = c->cfg->buffer_size;
actual_len = min(count, max_len);
mbo->buffer_length = actual_len;
{
size_t to_copy, not_copied, copied;
struct mbo *mbo;
- struct aim_channel *channel = filp->private_data;
+ struct aim_channel *c = filp->private_data;
- if (channel->stacked_mbo) {
- mbo = channel->stacked_mbo;
+ if (c->stacked_mbo) {
+ mbo = c->stacked_mbo;
goto start_copy;
}
- while ((!kfifo_out(&channel->fifo, &mbo, 1)) && (channel->dev)) {
+ while ((!kfifo_out(&c->fifo, &mbo, 1)) && (c->dev)) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
- if (wait_event_interruptible(channel->wq,
- (!kfifo_is_empty(&channel->fifo) ||
- (!channel->dev))))
+ if (wait_event_interruptible(c->wq,
+ (!kfifo_is_empty(&c->fifo) ||
+ (!c->dev))))
return -ERESTARTSYS;
}
- channel->stacked_mbo = mbo;
+ c->stacked_mbo = mbo;
start_copy:
/* make sure we don't submit to gone devices */
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
+ mutex_lock(&c->io_mutex);
+ if (unlikely(!c->dev)) {
+ mutex_unlock(&c->io_mutex);
return -EIO;
}
to_copy = min_t(size_t,
count,
- mbo->processed_length - channel->mbo_offs);
+ mbo->processed_length - c->mbo_offs);
not_copied = copy_to_user(buf,
- mbo->virt_address + channel->mbo_offs,
+ mbo->virt_address + c->mbo_offs,
to_copy);
copied = to_copy - not_copied;
- channel->mbo_offs += copied;
- if (channel->mbo_offs >= mbo->processed_length) {
+ c->mbo_offs += copied;
+ if (c->mbo_offs >= mbo->processed_length) {
most_put_mbo(mbo);
- channel->mbo_offs = 0;
- channel->stacked_mbo = NULL;
+ c->mbo_offs = 0;
+ c->stacked_mbo = NULL;
}
- mutex_unlock(&channel->io_mutex);
+ mutex_unlock(&c->io_mutex);
return copied;
}
*/
static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
if (!iface) {
pr_info("Bad interface pointer\n");
return -EINVAL;
}
- channel = get_channel(iface, channel_id);
- if (!channel)
+ c = get_channel(iface, channel_id);
+ if (!c)
return -ENXIO;
- mutex_lock(&channel->io_mutex);
- channel->dev = NULL;
- mutex_unlock(&channel->io_mutex);
+ mutex_lock(&c->io_mutex);
+ c->dev = NULL;
+ mutex_unlock(&c->io_mutex);
- if (atomic_read(&channel->access_ref)) {
- destroy_cdev(channel);
- kfree(channel);
+ if (atomic_read(&c->access_ref)) {
+ destroy_cdev(c);
+ kfree(c);
} else {
- wake_up_interruptible(&channel->wq);
+ wake_up_interruptible(&c->wq);
}
return 0;
}
*/
static int aim_rx_completion(struct mbo *mbo)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
if (!mbo)
return -EINVAL;
- channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
- if (!channel)
+ c = get_channel(mbo->ifp, mbo->hdm_channel_id);
+ if (!c)
return -ENXIO;
- kfifo_in(&channel->fifo, &mbo, 1);
+ kfifo_in(&c->fifo, &mbo, 1);
#ifdef DEBUG_MESG
- if (kfifo_is_full(&channel->fifo))
+ if (kfifo_is_full(&c->fifo))
pr_info("WARN: Fifo is full\n");
#endif
- wake_up_interruptible(&channel->wq);
+ wake_up_interruptible(&c->wq);
return 0;
}
*/
static int aim_tx_completion(struct most_interface *iface, int channel_id)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
if (!iface) {
pr_info("Bad interface pointer\n");
return -EINVAL;
}
- channel = get_channel(iface, channel_id);
- if (!channel)
+ c = get_channel(iface, channel_id);
+ if (!c)
return -ENXIO;
- wake_up_interruptible(&channel->wq);
+ wake_up_interruptible(&c->wq);
return 0;
}
struct most_channel_config *cfg,
struct kobject *parent, char *name)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
unsigned long cl_flags;
int retval;
int current_minor;
pr_info("Probing AIM with bad arguments");
return -EINVAL;
}
- channel = get_channel(iface, channel_id);
- if (channel)
+ c = get_channel(iface, channel_id);
+ if (c)
return -EEXIST;
current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
if (current_minor < 0)
return current_minor;
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel) {
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
retval = -ENOMEM;
goto error_alloc_channel;
}
- channel->devno = MKDEV(major, current_minor);
- cdev_init(&channel->cdev, &channel_fops);
- channel->cdev.owner = THIS_MODULE;
- cdev_add(&channel->cdev, channel->devno, 1);
- channel->iface = iface;
- channel->cfg = cfg;
- channel->channel_id = channel_id;
- channel->mbo_offs = 0;
- atomic_set(&channel->access_ref, -1);
- INIT_KFIFO(channel->fifo);
- retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
+ c->devno = MKDEV(major, current_minor);
+ cdev_init(&c->cdev, &channel_fops);
+ c->cdev.owner = THIS_MODULE;
+ cdev_add(&c->cdev, c->devno, 1);
+ c->iface = iface;
+ c->cfg = cfg;
+ c->channel_id = channel_id;
+ c->mbo_offs = 0;
+ atomic_set(&c->access_ref, -1);
+ INIT_KFIFO(c->fifo);
+ retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
if (retval) {
pr_info("failed to alloc channel kfifo");
goto error_alloc_kfifo;
}
- init_waitqueue_head(&channel->wq);
- mutex_init(&channel->io_mutex);
+ init_waitqueue_head(&c->wq);
+ mutex_init(&c->io_mutex);
spin_lock_irqsave(&ch_list_lock, cl_flags);
- list_add_tail(&channel->list, &channel_list);
+ list_add_tail(&c->list, &channel_list);
spin_unlock_irqrestore(&ch_list_lock, cl_flags);
- channel->dev = device_create(aim_class,
+ c->dev = device_create(aim_class,
NULL,
- channel->devno,
+ c->devno,
NULL,
"%s", name);
- retval = IS_ERR(channel->dev);
+ retval = IS_ERR(c->dev);
if (retval) {
pr_info("failed to create new device node %s\n", name);
goto error_create_device;
}
- kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
+ kobject_uevent(&c->dev->kobj, KOBJ_ADD);
return 0;
error_create_device:
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
+ kfifo_free(&c->fifo);
+ list_del(&c->list);
error_alloc_kfifo:
- cdev_del(&channel->cdev);
- kfree(channel);
+ cdev_del(&c->cdev);
+ kfree(c);
error_alloc_channel:
ida_simple_remove(&minor_id, current_minor);
return retval;
static void __exit mod_exit(void)
{
- struct aim_channel *channel, *tmp;
+ struct aim_channel *c, *tmp;
pr_info("exit module\n");
most_deregister_aim(&cdev_aim);
- list_for_each_entry_safe(channel, tmp, &channel_list, list) {
- destroy_cdev(channel);
- kfree(channel);
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ destroy_cdev(c);
+ kfree(c);
}
class_destroy(aim_class);
unregister_chrdev_region(aim_devno, 1);