1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
25 #include <linux/iio/iio.h>
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
30 static const char * const iio_endian_prefix[] = {
35 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 return !list_empty(&buf->buffer_list);
40 static bool iio_buffer_data_available(struct iio_buffer *buf)
42 if (buf->access->data_available)
43 return buf->access->data_available(buf);
45 return buf->stufftoread;
49 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
51 * This function relies on all buffer implementations having an
52 * iio_buffer as their first element.
54 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
55 size_t n, loff_t *f_ps)
57 struct iio_dev *indio_dev = filp->private_data;
58 struct iio_buffer *rb = indio_dev->buffer;
64 if (!rb || !rb->access->read_first_n)
68 if (!iio_buffer_data_available(rb)) {
69 if (filp->f_flags & O_NONBLOCK)
72 ret = wait_event_interruptible(rb->pollq,
73 iio_buffer_data_available(rb) ||
74 indio_dev->info == NULL);
77 if (indio_dev->info == NULL)
81 ret = rb->access->read_first_n(rb, n, buf);
82 if (ret == 0 && (filp->f_flags & O_NONBLOCK))
90 * iio_buffer_poll() - poll the buffer to find out if it has data
92 unsigned int iio_buffer_poll(struct file *filp,
93 struct poll_table_struct *wait)
95 struct iio_dev *indio_dev = filp->private_data;
96 struct iio_buffer *rb = indio_dev->buffer;
101 poll_wait(filp, &rb->pollq, wait);
102 if (iio_buffer_data_available(rb))
103 return POLLIN | POLLRDNORM;
104 /* need a way of knowing if there may be enough data... */
109 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
110 * @indio_dev: The IIO device
112 * Wakes up the event waitqueue used for poll(). Should usually
113 * be called when the device is unregistered.
115 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
117 if (!indio_dev->buffer)
120 wake_up(&indio_dev->buffer->pollq);
123 void iio_buffer_init(struct iio_buffer *buffer)
125 INIT_LIST_HEAD(&buffer->demux_list);
126 INIT_LIST_HEAD(&buffer->buffer_list);
127 init_waitqueue_head(&buffer->pollq);
128 kref_init(&buffer->ref);
130 EXPORT_SYMBOL(iio_buffer_init);
132 static ssize_t iio_show_scan_index(struct device *dev,
133 struct device_attribute *attr,
136 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
139 static ssize_t iio_show_fixed_type(struct device *dev,
140 struct device_attribute *attr,
143 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
144 u8 type = this_attr->c->scan_type.endianness;
146 if (type == IIO_CPU) {
147 #ifdef __LITTLE_ENDIAN
153 return sprintf(buf, "%s:%c%d/%d>>%u\n",
154 iio_endian_prefix[type],
155 this_attr->c->scan_type.sign,
156 this_attr->c->scan_type.realbits,
157 this_attr->c->scan_type.storagebits,
158 this_attr->c->scan_type.shift);
161 static ssize_t iio_scan_el_show(struct device *dev,
162 struct device_attribute *attr,
166 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
168 /* Ensure ret is 0 or 1. */
169 ret = !!test_bit(to_iio_dev_attr(attr)->address,
170 indio_dev->buffer->scan_mask);
172 return sprintf(buf, "%d\n", ret);
175 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
177 clear_bit(bit, buffer->scan_mask);
181 static ssize_t iio_scan_el_store(struct device *dev,
182 struct device_attribute *attr,
188 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
189 struct iio_buffer *buffer = indio_dev->buffer;
190 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
192 ret = strtobool(buf, &state);
195 mutex_lock(&indio_dev->mlock);
196 if (iio_buffer_is_active(indio_dev->buffer)) {
200 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
204 ret = iio_scan_mask_clear(buffer, this_attr->address);
207 } else if (state && !ret) {
208 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
214 mutex_unlock(&indio_dev->mlock);
216 return ret < 0 ? ret : len;
220 static ssize_t iio_scan_el_ts_show(struct device *dev,
221 struct device_attribute *attr,
224 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
225 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
228 static ssize_t iio_scan_el_ts_store(struct device *dev,
229 struct device_attribute *attr,
234 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
237 ret = strtobool(buf, &state);
241 mutex_lock(&indio_dev->mlock);
242 if (iio_buffer_is_active(indio_dev->buffer)) {
246 indio_dev->buffer->scan_timestamp = state;
248 mutex_unlock(&indio_dev->mlock);
250 return ret ? ret : len;
253 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
254 const struct iio_chan_spec *chan)
256 int ret, attrcount = 0;
257 struct iio_buffer *buffer = indio_dev->buffer;
259 ret = __iio_add_chan_devattr("index",
261 &iio_show_scan_index,
266 &buffer->scan_el_dev_attr_list);
270 ret = __iio_add_chan_devattr("type",
272 &iio_show_fixed_type,
277 &buffer->scan_el_dev_attr_list);
281 if (chan->type != IIO_TIMESTAMP)
282 ret = __iio_add_chan_devattr("en",
289 &buffer->scan_el_dev_attr_list);
291 ret = __iio_add_chan_devattr("en",
293 &iio_scan_el_ts_show,
294 &iio_scan_el_ts_store,
298 &buffer->scan_el_dev_attr_list);
307 static const char * const iio_scan_elements_group_name = "scan_elements";
309 int iio_buffer_register(struct iio_dev *indio_dev,
310 const struct iio_chan_spec *channels,
313 struct iio_dev_attr *p;
314 struct attribute **attr;
315 struct iio_buffer *buffer = indio_dev->buffer;
316 int ret, i, attrn, attrcount, attrcount_orig = 0;
319 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
321 if (buffer->scan_el_attrs != NULL) {
322 attr = buffer->scan_el_attrs->attrs;
323 while (*attr++ != NULL)
326 attrcount = attrcount_orig;
327 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
330 for (i = 0; i < num_channels; i++) {
331 if (channels[i].scan_index < 0)
334 /* Establish necessary mask length */
335 if (channels[i].scan_index >
336 (int)indio_dev->masklength - 1)
337 indio_dev->masklength
338 = channels[i].scan_index + 1;
340 ret = iio_buffer_add_channel_sysfs(indio_dev,
343 goto error_cleanup_dynamic;
345 if (channels[i].type == IIO_TIMESTAMP)
346 indio_dev->scan_index_timestamp =
347 channels[i].scan_index;
349 if (indio_dev->masklength && buffer->scan_mask == NULL) {
350 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
351 sizeof(*buffer->scan_mask),
353 if (buffer->scan_mask == NULL) {
355 goto error_cleanup_dynamic;
360 buffer->scan_el_group.name = iio_scan_elements_group_name;
362 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
363 sizeof(buffer->scan_el_group.attrs[0]),
365 if (buffer->scan_el_group.attrs == NULL) {
367 goto error_free_scan_mask;
369 if (buffer->scan_el_attrs)
370 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
371 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
372 attrn = attrcount_orig;
374 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
375 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
376 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
380 error_free_scan_mask:
381 kfree(buffer->scan_mask);
382 error_cleanup_dynamic:
383 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
387 EXPORT_SYMBOL(iio_buffer_register);
389 void iio_buffer_unregister(struct iio_dev *indio_dev)
391 kfree(indio_dev->buffer->scan_mask);
392 kfree(indio_dev->buffer->scan_el_group.attrs);
393 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
395 EXPORT_SYMBOL(iio_buffer_unregister);
397 ssize_t iio_buffer_read_length(struct device *dev,
398 struct device_attribute *attr,
401 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
402 struct iio_buffer *buffer = indio_dev->buffer;
404 if (buffer->access->get_length)
405 return sprintf(buf, "%d\n",
406 buffer->access->get_length(buffer));
410 EXPORT_SYMBOL(iio_buffer_read_length);
412 ssize_t iio_buffer_write_length(struct device *dev,
413 struct device_attribute *attr,
417 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
418 struct iio_buffer *buffer = indio_dev->buffer;
422 ret = kstrtouint(buf, 10, &val);
426 if (buffer->access->get_length)
427 if (val == buffer->access->get_length(buffer))
430 mutex_lock(&indio_dev->mlock);
431 if (iio_buffer_is_active(indio_dev->buffer)) {
434 if (buffer->access->set_length)
435 buffer->access->set_length(buffer, val);
438 mutex_unlock(&indio_dev->mlock);
440 return ret ? ret : len;
442 EXPORT_SYMBOL(iio_buffer_write_length);
444 ssize_t iio_buffer_show_enable(struct device *dev,
445 struct device_attribute *attr,
448 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
449 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
451 EXPORT_SYMBOL(iio_buffer_show_enable);
453 /* Note NULL used as error indicator as it doesn't make sense. */
454 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
455 unsigned int masklength,
456 const unsigned long *mask)
458 if (bitmap_empty(mask, masklength))
461 if (bitmap_subset(mask, av_masks, masklength))
463 av_masks += BITS_TO_LONGS(masklength);
468 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
469 const unsigned long *mask, bool timestamp)
471 const struct iio_chan_spec *ch;
475 /* How much space will the demuxed element take? */
476 for_each_set_bit(i, mask,
477 indio_dev->masklength) {
478 ch = iio_find_channel_from_si(indio_dev, i);
479 length = ch->scan_type.storagebits / 8;
480 bytes = ALIGN(bytes, length);
484 ch = iio_find_channel_from_si(indio_dev,
485 indio_dev->scan_index_timestamp);
486 length = ch->scan_type.storagebits / 8;
487 bytes = ALIGN(bytes, length);
493 static void iio_buffer_activate(struct iio_dev *indio_dev,
494 struct iio_buffer *buffer)
496 iio_buffer_get(buffer);
497 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
500 static void iio_buffer_deactivate(struct iio_buffer *buffer)
502 list_del_init(&buffer->buffer_list);
503 iio_buffer_put(buffer);
506 void iio_disable_all_buffers(struct iio_dev *indio_dev)
508 struct iio_buffer *buffer, *_buffer;
510 if (list_empty(&indio_dev->buffer_list))
513 if (indio_dev->setup_ops->predisable)
514 indio_dev->setup_ops->predisable(indio_dev);
516 list_for_each_entry_safe(buffer, _buffer,
517 &indio_dev->buffer_list, buffer_list)
518 iio_buffer_deactivate(buffer);
520 indio_dev->currentmode = INDIO_DIRECT_MODE;
521 if (indio_dev->setup_ops->postdisable)
522 indio_dev->setup_ops->postdisable(indio_dev);
524 if (indio_dev->available_scan_masks == NULL)
525 kfree(indio_dev->active_scan_mask);
528 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
529 struct iio_buffer *buffer)
533 if (!buffer->access->set_bytes_per_datum)
536 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
537 buffer->scan_timestamp);
539 buffer->access->set_bytes_per_datum(buffer, bytes);
542 static int __iio_update_buffers(struct iio_dev *indio_dev,
543 struct iio_buffer *insert_buffer,
544 struct iio_buffer *remove_buffer)
548 struct iio_buffer *buffer;
549 unsigned long *compound_mask;
550 const unsigned long *old_mask;
552 /* Wind down existing buffers - iff there are any */
553 if (!list_empty(&indio_dev->buffer_list)) {
554 if (indio_dev->setup_ops->predisable) {
555 ret = indio_dev->setup_ops->predisable(indio_dev);
559 indio_dev->currentmode = INDIO_DIRECT_MODE;
560 if (indio_dev->setup_ops->postdisable) {
561 ret = indio_dev->setup_ops->postdisable(indio_dev);
566 /* Keep a copy of current setup to allow roll back */
567 old_mask = indio_dev->active_scan_mask;
568 if (!indio_dev->available_scan_masks)
569 indio_dev->active_scan_mask = NULL;
572 iio_buffer_deactivate(remove_buffer);
574 iio_buffer_activate(indio_dev, insert_buffer);
576 /* If no buffers in list, we are done */
577 if (list_empty(&indio_dev->buffer_list)) {
578 indio_dev->currentmode = INDIO_DIRECT_MODE;
579 if (indio_dev->available_scan_masks == NULL)
584 /* What scan mask do we actually have? */
585 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
586 sizeof(long), GFP_KERNEL);
587 if (compound_mask == NULL) {
588 if (indio_dev->available_scan_masks == NULL)
592 indio_dev->scan_timestamp = 0;
594 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
595 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
596 indio_dev->masklength);
597 indio_dev->scan_timestamp |= buffer->scan_timestamp;
599 if (indio_dev->available_scan_masks) {
600 indio_dev->active_scan_mask =
601 iio_scan_mask_match(indio_dev->available_scan_masks,
602 indio_dev->masklength,
604 if (indio_dev->active_scan_mask == NULL) {
607 * Note can only occur when adding a buffer.
609 iio_buffer_deactivate(insert_buffer);
611 indio_dev->active_scan_mask = old_mask;
615 kfree(compound_mask);
621 indio_dev->active_scan_mask = compound_mask;
624 iio_update_demux(indio_dev);
627 if (indio_dev->setup_ops->preenable) {
628 ret = indio_dev->setup_ops->preenable(indio_dev);
631 "Buffer not started: buffer preenable failed (%d)\n", ret);
632 goto error_remove_inserted;
635 indio_dev->scan_bytes =
636 iio_compute_scan_bytes(indio_dev,
637 indio_dev->active_scan_mask,
638 indio_dev->scan_timestamp);
639 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
640 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
641 if (buffer->access->request_update) {
642 ret = buffer->access->request_update(buffer);
645 "Buffer not started: buffer parameter update failed (%d)\n", ret);
646 goto error_run_postdisable;
650 if (indio_dev->info->update_scan_mode) {
651 ret = indio_dev->info
652 ->update_scan_mode(indio_dev,
653 indio_dev->active_scan_mask);
655 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
656 goto error_run_postdisable;
659 /* Definitely possible for devices to support both of these. */
660 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
661 if (!indio_dev->trig) {
662 printk(KERN_INFO "Buffer not started: no trigger\n");
664 /* Can only occur on first buffer */
665 goto error_run_postdisable;
667 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
668 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
669 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
670 } else { /* Should never be reached */
672 goto error_run_postdisable;
675 if (indio_dev->setup_ops->postenable) {
676 ret = indio_dev->setup_ops->postenable(indio_dev);
679 "Buffer not started: postenable failed (%d)\n", ret);
680 indio_dev->currentmode = INDIO_DIRECT_MODE;
681 if (indio_dev->setup_ops->postdisable)
682 indio_dev->setup_ops->postdisable(indio_dev);
683 goto error_disable_all_buffers;
687 if (indio_dev->available_scan_masks)
688 kfree(compound_mask);
694 error_disable_all_buffers:
695 indio_dev->currentmode = INDIO_DIRECT_MODE;
696 error_run_postdisable:
697 if (indio_dev->setup_ops->postdisable)
698 indio_dev->setup_ops->postdisable(indio_dev);
699 error_remove_inserted:
702 iio_buffer_deactivate(insert_buffer);
703 indio_dev->active_scan_mask = old_mask;
704 kfree(compound_mask);
710 int iio_update_buffers(struct iio_dev *indio_dev,
711 struct iio_buffer *insert_buffer,
712 struct iio_buffer *remove_buffer)
716 if (insert_buffer == remove_buffer)
719 mutex_lock(&indio_dev->info_exist_lock);
720 mutex_lock(&indio_dev->mlock);
722 if (insert_buffer && iio_buffer_is_active(insert_buffer))
723 insert_buffer = NULL;
725 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
726 remove_buffer = NULL;
728 if (!insert_buffer && !remove_buffer) {
733 if (indio_dev->info == NULL) {
738 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
741 mutex_unlock(&indio_dev->mlock);
742 mutex_unlock(&indio_dev->info_exist_lock);
746 EXPORT_SYMBOL_GPL(iio_update_buffers);
748 ssize_t iio_buffer_store_enable(struct device *dev,
749 struct device_attribute *attr,
754 bool requested_state;
755 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
758 ret = strtobool(buf, &requested_state);
762 mutex_lock(&indio_dev->mlock);
764 /* Find out if it is in the list */
765 inlist = iio_buffer_is_active(indio_dev->buffer);
766 /* Already in desired state */
767 if (inlist == requested_state)
771 ret = __iio_update_buffers(indio_dev,
772 indio_dev->buffer, NULL);
774 ret = __iio_update_buffers(indio_dev,
775 NULL, indio_dev->buffer);
780 mutex_unlock(&indio_dev->mlock);
781 return (ret < 0) ? ret : len;
783 EXPORT_SYMBOL(iio_buffer_store_enable);
786 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
787 * @indio_dev: the iio device
788 * @mask: scan mask to be checked
790 * Return true if exactly one bit is set in the scan mask, false otherwise. It
791 * can be used for devices where only one channel can be active for sampling at
794 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
795 const unsigned long *mask)
797 return bitmap_weight(mask, indio_dev->masklength) == 1;
799 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
801 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
802 const unsigned long *mask)
804 if (!indio_dev->setup_ops->validate_scan_mask)
807 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
811 * iio_scan_mask_set() - set particular bit in the scan mask
812 * @indio_dev: the iio device
813 * @buffer: the buffer whose scan mask we are interested in
814 * @bit: the bit to be set.
816 * Note that at this point we have no way of knowing what other
817 * buffers might request, hence this code only verifies that the
818 * individual buffers request is plausible.
820 int iio_scan_mask_set(struct iio_dev *indio_dev,
821 struct iio_buffer *buffer, int bit)
823 const unsigned long *mask;
824 unsigned long *trialmask;
826 trialmask = kmalloc(sizeof(*trialmask)*
827 BITS_TO_LONGS(indio_dev->masklength),
830 if (trialmask == NULL)
832 if (!indio_dev->masklength) {
833 WARN_ON("Trying to set scanmask prior to registering buffer\n");
834 goto err_invalid_mask;
836 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
837 set_bit(bit, trialmask);
839 if (!iio_validate_scan_mask(indio_dev, trialmask))
840 goto err_invalid_mask;
842 if (indio_dev->available_scan_masks) {
843 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
844 indio_dev->masklength,
847 goto err_invalid_mask;
849 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
859 EXPORT_SYMBOL_GPL(iio_scan_mask_set);
861 int iio_scan_mask_query(struct iio_dev *indio_dev,
862 struct iio_buffer *buffer, int bit)
864 if (bit > indio_dev->masklength)
867 if (!buffer->scan_mask)
870 /* Ensure return value is 0 or 1. */
871 return !!test_bit(bit, buffer->scan_mask);
873 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
876 * struct iio_demux_table() - table describing demux memcpy ops
877 * @from: index to copy from
878 * @to: index to copy to
879 * @length: how many bytes to copy
880 * @l: list head used for management
882 struct iio_demux_table {
889 static const void *iio_demux(struct iio_buffer *buffer,
892 struct iio_demux_table *t;
894 if (list_empty(&buffer->demux_list))
896 list_for_each_entry(t, &buffer->demux_list, l)
897 memcpy(buffer->demux_bounce + t->to,
898 datain + t->from, t->length);
900 return buffer->demux_bounce;
903 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
905 const void *dataout = iio_demux(buffer, data);
907 return buffer->access->store_to(buffer, dataout);
910 static void iio_buffer_demux_free(struct iio_buffer *buffer)
912 struct iio_demux_table *p, *q;
913 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
920 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
923 struct iio_buffer *buf;
925 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
926 ret = iio_push_to_buffer(buf, data);
933 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
935 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
936 struct iio_buffer *buffer)
938 const struct iio_chan_spec *ch;
939 int ret, in_ind = -1, out_ind, length;
940 unsigned in_loc = 0, out_loc = 0;
941 struct iio_demux_table *p;
943 /* Clear out any old demux */
944 iio_buffer_demux_free(buffer);
945 kfree(buffer->demux_bounce);
946 buffer->demux_bounce = NULL;
948 /* First work out which scan mode we will actually have */
949 if (bitmap_equal(indio_dev->active_scan_mask,
951 indio_dev->masklength))
954 /* Now we have the two masks, work from least sig and build up sizes */
955 for_each_set_bit(out_ind,
957 indio_dev->masklength) {
958 in_ind = find_next_bit(indio_dev->active_scan_mask,
959 indio_dev->masklength,
961 while (in_ind != out_ind) {
962 in_ind = find_next_bit(indio_dev->active_scan_mask,
963 indio_dev->masklength,
965 ch = iio_find_channel_from_si(indio_dev, in_ind);
966 length = ch->scan_type.storagebits/8;
967 /* Make sure we are aligned */
970 in_loc += length - in_loc % length;
972 p = kmalloc(sizeof(*p), GFP_KERNEL);
975 goto error_clear_mux_table;
977 ch = iio_find_channel_from_si(indio_dev, in_ind);
978 length = ch->scan_type.storagebits/8;
979 if (out_loc % length)
980 out_loc += length - out_loc % length;
982 in_loc += length - in_loc % length;
986 list_add_tail(&p->l, &buffer->demux_list);
990 /* Relies on scan_timestamp being last */
991 if (buffer->scan_timestamp) {
992 p = kmalloc(sizeof(*p), GFP_KERNEL);
995 goto error_clear_mux_table;
997 ch = iio_find_channel_from_si(indio_dev,
998 indio_dev->scan_index_timestamp);
999 length = ch->scan_type.storagebits/8;
1000 if (out_loc % length)
1001 out_loc += length - out_loc % length;
1002 if (in_loc % length)
1003 in_loc += length - in_loc % length;
1007 list_add_tail(&p->l, &buffer->demux_list);
1011 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1012 if (buffer->demux_bounce == NULL) {
1014 goto error_clear_mux_table;
1018 error_clear_mux_table:
1019 iio_buffer_demux_free(buffer);
1024 int iio_update_demux(struct iio_dev *indio_dev)
1026 struct iio_buffer *buffer;
1029 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1030 ret = iio_buffer_update_demux(indio_dev, buffer);
1032 goto error_clear_mux_table;
1036 error_clear_mux_table:
1037 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1038 iio_buffer_demux_free(buffer);
1042 EXPORT_SYMBOL_GPL(iio_update_demux);
1045 * iio_buffer_release() - Free a buffer's resources
1046 * @ref: Pointer to the kref embedded in the iio_buffer struct
1048 * This function is called when the last reference to the buffer has been
1049 * dropped. It will typically free all resources allocated by the buffer. Do not
1050 * call this function manually, always use iio_buffer_put() when done using a
1053 static void iio_buffer_release(struct kref *ref)
1055 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1057 buffer->access->release(buffer);
1061 * iio_buffer_get() - Grab a reference to the buffer
1062 * @buffer: The buffer to grab a reference for, may be NULL
1064 * Returns the pointer to the buffer that was passed into the function.
1066 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1069 kref_get(&buffer->ref);
1073 EXPORT_SYMBOL_GPL(iio_buffer_get);
1076 * iio_buffer_put() - Release the reference to the buffer
1077 * @buffer: The buffer to release the reference for, may be NULL
1079 void iio_buffer_put(struct iio_buffer *buffer)
1082 kref_put(&buffer->ref, iio_buffer_release);
1084 EXPORT_SYMBOL_GPL(iio_buffer_put);