1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
4 * Copyright (c) 2011 Jonathan Cameron
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/mutex.h>
12 #include <linux/iio/iio.h>
13 #include <linux/iio/iio-opaque.h>
15 #include <linux/iio/machine.h>
16 #include <linux/iio/driver.h>
17 #include <linux/iio/consumer.h>
19 struct iio_map_internal {
20 struct iio_dev *indio_dev;
25 static LIST_HEAD(iio_map_list);
26 static DEFINE_MUTEX(iio_map_list_lock);
28 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
31 struct iio_map_internal *mapi, *next;
33 list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
34 if (indio_dev == mapi->indio_dev) {
43 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
46 struct iio_map_internal *mapi;
51 mutex_lock(&iio_map_list_lock);
52 while (maps[i].consumer_dev_name != NULL) {
53 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
59 mapi->indio_dev = indio_dev;
60 list_add_tail(&mapi->l, &iio_map_list);
65 iio_map_array_unregister_locked(indio_dev);
66 mutex_unlock(&iio_map_list_lock);
70 EXPORT_SYMBOL_GPL(iio_map_array_register);
74 * Remove all map entries associated with the given iio device
76 int iio_map_array_unregister(struct iio_dev *indio_dev)
80 mutex_lock(&iio_map_list_lock);
81 ret = iio_map_array_unregister_locked(indio_dev);
82 mutex_unlock(&iio_map_list_lock);
86 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
88 static const struct iio_chan_spec
89 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
92 const struct iio_chan_spec *chan = NULL;
94 for (i = 0; i < indio_dev->num_channels; i++)
95 if (indio_dev->channels[i].datasheet_name &&
96 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
97 chan = &indio_dev->channels[i];
105 static int iio_dev_node_match(struct device *dev, const void *data)
107 return dev->of_node == data && dev->type == &iio_device_type;
111 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
112 * @indio_dev: pointer to the iio_dev structure
113 * @iiospec: IIO specifier as found in the device tree
115 * This is simple translation function, suitable for the most 1:1 mapped
116 * channels in IIO chips. This function performs only one sanity check:
117 * whether IIO index is less than num_channels (that is specified in the
120 static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
121 const struct of_phandle_args *iiospec)
123 if (!iiospec->args_count)
126 if (iiospec->args[0] >= indio_dev->num_channels) {
127 dev_err(&indio_dev->dev, "invalid channel index %u\n",
132 return iiospec->args[0];
135 static int __of_iio_channel_get(struct iio_channel *channel,
136 struct device_node *np, int index)
139 struct iio_dev *indio_dev;
141 struct of_phandle_args iiospec;
143 err = of_parse_phandle_with_args(np, "io-channels",
149 idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
151 of_node_put(iiospec.np);
153 return -EPROBE_DEFER;
155 indio_dev = dev_to_iio_dev(idev);
156 channel->indio_dev = indio_dev;
157 if (indio_dev->info->of_xlate)
158 index = indio_dev->info->of_xlate(indio_dev, &iiospec);
160 index = __of_iio_simple_xlate(indio_dev, &iiospec);
163 channel->channel = &indio_dev->channels[index];
168 iio_device_put(indio_dev);
172 static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
174 struct iio_channel *channel;
178 return ERR_PTR(-EINVAL);
180 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
182 return ERR_PTR(-ENOMEM);
184 err = __of_iio_channel_get(channel, np, index);
186 goto err_free_channel;
195 struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
198 struct iio_channel *chan = NULL;
200 /* Walk up the tree of devices looking for a matching iio channel */
205 * For named iio channels, first look up the name in the
206 * "io-channel-names" property. If it cannot be found, the
207 * index will be an error code, and of_iio_channel_get()
211 index = of_property_match_string(np, "io-channel-names",
213 chan = of_iio_channel_get(np, index);
214 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
216 else if (name && index >= 0) {
217 pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
218 np, name ? name : "", index);
223 * No matching IIO channel found on this node.
224 * If the parent node has a "io-channel-ranges" property,
225 * then we can try one of its channels.
228 if (np && !of_get_property(np, "io-channel-ranges", NULL))
234 EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name);
236 static struct iio_channel *of_iio_channel_get_all(struct device *dev)
238 struct iio_channel *chans;
239 int i, mapind, nummaps = 0;
243 ret = of_parse_phandle_with_args(dev->of_node,
251 if (nummaps == 0) /* no error, return NULL to search map table */
254 /* NULL terminated array to save passing size */
255 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
257 return ERR_PTR(-ENOMEM);
259 /* Search for OF matches */
260 for (mapind = 0; mapind < nummaps; mapind++) {
261 ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
264 goto error_free_chans;
269 for (i = 0; i < mapind; i++)
270 iio_device_put(chans[i].indio_dev);
275 #else /* CONFIG_OF */
277 static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
282 #endif /* CONFIG_OF */
284 static struct iio_channel *iio_channel_get_sys(const char *name,
285 const char *channel_name)
287 struct iio_map_internal *c_i = NULL, *c = NULL;
288 struct iio_channel *channel;
291 if (name == NULL && channel_name == NULL)
292 return ERR_PTR(-ENODEV);
294 /* first find matching entry the channel map */
295 mutex_lock(&iio_map_list_lock);
296 list_for_each_entry(c_i, &iio_map_list, l) {
297 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
299 strcmp(channel_name, c_i->map->consumer_channel) != 0))
302 iio_device_get(c->indio_dev);
305 mutex_unlock(&iio_map_list_lock);
307 return ERR_PTR(-ENODEV);
309 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
310 if (channel == NULL) {
315 channel->indio_dev = c->indio_dev;
317 if (c->map->adc_channel_label) {
319 iio_chan_spec_from_name(channel->indio_dev,
320 c->map->adc_channel_label);
322 if (channel->channel == NULL) {
333 iio_device_put(c->indio_dev);
337 struct iio_channel *iio_channel_get(struct device *dev,
338 const char *channel_name)
340 const char *name = dev ? dev_name(dev) : NULL;
341 struct iio_channel *channel;
344 channel = of_iio_channel_get_by_name(dev->of_node,
350 return iio_channel_get_sys(name, channel_name);
352 EXPORT_SYMBOL_GPL(iio_channel_get);
354 void iio_channel_release(struct iio_channel *channel)
358 iio_device_put(channel->indio_dev);
361 EXPORT_SYMBOL_GPL(iio_channel_release);
363 static void devm_iio_channel_free(void *iio_channel)
365 iio_channel_release(iio_channel);
368 struct iio_channel *devm_iio_channel_get(struct device *dev,
369 const char *channel_name)
371 struct iio_channel *channel;
374 channel = iio_channel_get(dev, channel_name);
378 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
384 EXPORT_SYMBOL_GPL(devm_iio_channel_get);
386 struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
387 struct device_node *np,
388 const char *channel_name)
390 struct iio_channel *channel;
393 channel = of_iio_channel_get_by_name(np, channel_name);
397 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
403 EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name);
405 struct iio_channel *iio_channel_get_all(struct device *dev)
408 struct iio_channel *chans;
409 struct iio_map_internal *c = NULL;
415 return ERR_PTR(-EINVAL);
417 chans = of_iio_channel_get_all(dev);
421 name = dev_name(dev);
423 mutex_lock(&iio_map_list_lock);
424 /* first count the matching maps */
425 list_for_each_entry(c, &iio_map_list, l)
426 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
436 /* NULL terminated array to save passing size */
437 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
443 /* for each map fill in the chans element */
444 list_for_each_entry(c, &iio_map_list, l) {
445 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
447 chans[mapind].indio_dev = c->indio_dev;
448 chans[mapind].data = c->map->consumer_data;
449 chans[mapind].channel =
450 iio_chan_spec_from_name(chans[mapind].indio_dev,
451 c->map->adc_channel_label);
452 if (chans[mapind].channel == NULL) {
454 goto error_free_chans;
456 iio_device_get(chans[mapind].indio_dev);
461 goto error_free_chans;
463 mutex_unlock(&iio_map_list_lock);
468 for (i = 0; i < nummaps; i++)
469 iio_device_put(chans[i].indio_dev);
472 mutex_unlock(&iio_map_list_lock);
476 EXPORT_SYMBOL_GPL(iio_channel_get_all);
478 void iio_channel_release_all(struct iio_channel *channels)
480 struct iio_channel *chan = &channels[0];
482 while (chan->indio_dev) {
483 iio_device_put(chan->indio_dev);
488 EXPORT_SYMBOL_GPL(iio_channel_release_all);
490 static void devm_iio_channel_free_all(void *iio_channels)
492 iio_channel_release_all(iio_channels);
495 struct iio_channel *devm_iio_channel_get_all(struct device *dev)
497 struct iio_channel *channels;
500 channels = iio_channel_get_all(dev);
501 if (IS_ERR(channels))
504 ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
511 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
513 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
514 enum iio_chan_info_enum info)
517 int vals[INDIO_MAX_RAW_ELEMENTS];
524 if (!iio_channel_has_info(chan->channel, info))
527 if (chan->indio_dev->info->read_raw_multi) {
528 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
529 chan->channel, INDIO_MAX_RAW_ELEMENTS,
530 vals, &val_len, info);
534 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
535 chan->channel, val, val2, info);
540 int iio_read_channel_raw(struct iio_channel *chan, int *val)
542 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
545 mutex_lock(&iio_dev_opaque->info_exist_lock);
546 if (chan->indio_dev->info == NULL) {
551 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
553 mutex_unlock(&iio_dev_opaque->info_exist_lock);
557 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
559 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
561 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
564 mutex_lock(&iio_dev_opaque->info_exist_lock);
565 if (chan->indio_dev->info == NULL) {
570 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
572 mutex_unlock(&iio_dev_opaque->info_exist_lock);
576 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
578 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
579 int raw, int *processed, unsigned int scale)
581 int scale_type, scale_val, scale_val2, offset;
585 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
589 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
590 IIO_CHAN_INFO_SCALE);
591 if (scale_type < 0) {
593 * Just pass raw values as processed if no scaling is
600 switch (scale_type) {
602 *processed = raw64 * scale_val;
604 case IIO_VAL_INT_PLUS_MICRO:
606 *processed = -raw64 * scale_val;
608 *processed = raw64 * scale_val;
609 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
612 case IIO_VAL_INT_PLUS_NANO:
614 *processed = -raw64 * scale_val;
616 *processed = raw64 * scale_val;
617 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
620 case IIO_VAL_FRACTIONAL:
621 *processed = div_s64(raw64 * (s64)scale_val * scale,
624 case IIO_VAL_FRACTIONAL_LOG2:
625 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
634 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
635 int *processed, unsigned int scale)
637 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
640 mutex_lock(&iio_dev_opaque->info_exist_lock);
641 if (chan->indio_dev->info == NULL) {
646 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
649 mutex_unlock(&iio_dev_opaque->info_exist_lock);
653 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
655 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
656 enum iio_chan_info_enum attribute)
658 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
661 mutex_lock(&iio_dev_opaque->info_exist_lock);
662 if (chan->indio_dev->info == NULL) {
667 ret = iio_channel_read(chan, val, val2, attribute);
669 mutex_unlock(&iio_dev_opaque->info_exist_lock);
673 EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
675 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
677 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
679 EXPORT_SYMBOL_GPL(iio_read_channel_offset);
681 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
684 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
687 mutex_lock(&iio_dev_opaque->info_exist_lock);
688 if (chan->indio_dev->info == NULL) {
693 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
694 ret = iio_channel_read(chan, val, NULL,
695 IIO_CHAN_INFO_PROCESSED);
700 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
703 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
708 mutex_unlock(&iio_dev_opaque->info_exist_lock);
712 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
714 int iio_read_channel_processed(struct iio_channel *chan, int *val)
716 /* This is just a special case with scale factor 1 */
717 return iio_read_channel_processed_scale(chan, val, 1);
719 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
721 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
723 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
725 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
727 static int iio_channel_read_avail(struct iio_channel *chan,
728 const int **vals, int *type, int *length,
729 enum iio_chan_info_enum info)
731 if (!iio_channel_has_available(chan->channel, info))
734 return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
735 vals, type, length, info);
738 int iio_read_avail_channel_attribute(struct iio_channel *chan,
739 const int **vals, int *type, int *length,
740 enum iio_chan_info_enum attribute)
742 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
745 mutex_lock(&iio_dev_opaque->info_exist_lock);
746 if (!chan->indio_dev->info) {
751 ret = iio_channel_read_avail(chan, vals, type, length, attribute);
753 mutex_unlock(&iio_dev_opaque->info_exist_lock);
757 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
759 int iio_read_avail_channel_raw(struct iio_channel *chan,
760 const int **vals, int *length)
765 ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
768 if (ret >= 0 && type != IIO_VAL_INT)
769 /* raw values are assumed to be IIO_VAL_INT */
774 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
776 static int iio_channel_read_max(struct iio_channel *chan,
777 int *val, int *val2, int *type,
778 enum iio_chan_info_enum info)
788 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
790 case IIO_AVAIL_RANGE:
806 *val = vals[--length];
808 if (vals[--length] > *val)
813 /* FIXME: learn about max for other iio values */
823 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
825 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
829 mutex_lock(&iio_dev_opaque->info_exist_lock);
830 if (!chan->indio_dev->info) {
835 ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
837 mutex_unlock(&iio_dev_opaque->info_exist_lock);
841 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
843 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
845 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
847 /* Need to verify underlying driver has not gone away */
849 mutex_lock(&iio_dev_opaque->info_exist_lock);
850 if (chan->indio_dev->info == NULL) {
855 *type = chan->channel->type;
857 mutex_unlock(&iio_dev_opaque->info_exist_lock);
861 EXPORT_SYMBOL_GPL(iio_get_channel_type);
863 static int iio_channel_write(struct iio_channel *chan, int val, int val2,
864 enum iio_chan_info_enum info)
866 return chan->indio_dev->info->write_raw(chan->indio_dev,
867 chan->channel, val, val2, info);
870 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
871 enum iio_chan_info_enum attribute)
873 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
876 mutex_lock(&iio_dev_opaque->info_exist_lock);
877 if (chan->indio_dev->info == NULL) {
882 ret = iio_channel_write(chan, val, val2, attribute);
884 mutex_unlock(&iio_dev_opaque->info_exist_lock);
888 EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
890 int iio_write_channel_raw(struct iio_channel *chan, int val)
892 return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
894 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
896 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
898 const struct iio_chan_spec_ext_info *ext_info;
901 if (!chan->channel->ext_info)
904 for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
909 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
911 static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
912 const struct iio_channel *chan,
915 const struct iio_chan_spec_ext_info *ext_info;
917 if (!chan->channel->ext_info)
920 for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
921 if (!strcmp(attr, ext_info->name))
928 ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
929 const char *attr, char *buf)
931 const struct iio_chan_spec_ext_info *ext_info;
933 ext_info = iio_lookup_ext_info(chan, attr);
937 return ext_info->read(chan->indio_dev, ext_info->private,
940 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
942 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
943 const char *buf, size_t len)
945 const struct iio_chan_spec_ext_info *ext_info;
947 ext_info = iio_lookup_ext_info(chan, attr);
951 return ext_info->write(chan->indio_dev, ext_info->private,
952 chan->channel, buf, len);
954 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);