1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
4 * Copyright (c) 2011 Jonathan Cameron
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/mutex.h>
12 #include <linux/iio/iio.h>
14 #include <linux/iio/machine.h>
15 #include <linux/iio/driver.h>
16 #include <linux/iio/consumer.h>
18 struct iio_map_internal {
19 struct iio_dev *indio_dev;
24 static LIST_HEAD(iio_map_list);
25 static DEFINE_MUTEX(iio_map_list_lock);
27 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
30 struct iio_map_internal *mapi, *next;
32 list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
33 if (indio_dev == mapi->indio_dev) {
42 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
45 struct iio_map_internal *mapi;
50 mutex_lock(&iio_map_list_lock);
51 while (maps[i].consumer_dev_name != NULL) {
52 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
58 mapi->indio_dev = indio_dev;
59 list_add_tail(&mapi->l, &iio_map_list);
64 iio_map_array_unregister_locked(indio_dev);
65 mutex_unlock(&iio_map_list_lock);
69 EXPORT_SYMBOL_GPL(iio_map_array_register);
73 * Remove all map entries associated with the given iio device
75 int iio_map_array_unregister(struct iio_dev *indio_dev)
79 mutex_lock(&iio_map_list_lock);
80 ret = iio_map_array_unregister_locked(indio_dev);
81 mutex_unlock(&iio_map_list_lock);
85 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
87 static const struct iio_chan_spec
88 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
91 const struct iio_chan_spec *chan = NULL;
93 for (i = 0; i < indio_dev->num_channels; i++)
94 if (indio_dev->channels[i].datasheet_name &&
95 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
96 chan = &indio_dev->channels[i];
104 static int iio_dev_node_match(struct device *dev, const void *data)
106 return dev->of_node == data && dev->type == &iio_device_type;
110 * __of_iio_simple_xlate - translate iiospec to the IIO channel index
111 * @indio_dev: pointer to the iio_dev structure
112 * @iiospec: IIO specifier as found in the device tree
114 * This is simple translation function, suitable for the most 1:1 mapped
115 * channels in IIO chips. This function performs only one sanity check:
116 * whether IIO index is less than num_channels (that is specified in the
119 static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
120 const struct of_phandle_args *iiospec)
122 if (!iiospec->args_count)
125 if (iiospec->args[0] >= indio_dev->num_channels) {
126 dev_err(&indio_dev->dev, "invalid channel index %u\n",
131 return iiospec->args[0];
134 static int __of_iio_channel_get(struct iio_channel *channel,
135 struct device_node *np, int index)
138 struct iio_dev *indio_dev;
140 struct of_phandle_args iiospec;
142 err = of_parse_phandle_with_args(np, "io-channels",
148 idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
150 of_node_put(iiospec.np);
152 return -EPROBE_DEFER;
154 indio_dev = dev_to_iio_dev(idev);
155 channel->indio_dev = indio_dev;
156 if (indio_dev->info->of_xlate)
157 index = indio_dev->info->of_xlate(indio_dev, &iiospec);
159 index = __of_iio_simple_xlate(indio_dev, &iiospec);
162 channel->channel = &indio_dev->channels[index];
167 iio_device_put(indio_dev);
171 static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
173 struct iio_channel *channel;
177 return ERR_PTR(-EINVAL);
179 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
181 return ERR_PTR(-ENOMEM);
183 err = __of_iio_channel_get(channel, np, index);
185 goto err_free_channel;
194 struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
197 struct iio_channel *chan = NULL;
199 /* Walk up the tree of devices looking for a matching iio channel */
204 * For named iio channels, first look up the name in the
205 * "io-channel-names" property. If it cannot be found, the
206 * index will be an error code, and of_iio_channel_get()
210 index = of_property_match_string(np, "io-channel-names",
212 chan = of_iio_channel_get(np, index);
213 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
215 else if (name && index >= 0) {
216 pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
217 np, name ? name : "", index);
222 * No matching IIO channel found on this node.
223 * If the parent node has a "io-channel-ranges" property,
224 * then we can try one of its channels.
227 if (np && !of_get_property(np, "io-channel-ranges", NULL))
233 EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name);
235 static struct iio_channel *of_iio_channel_get_all(struct device *dev)
237 struct iio_channel *chans;
238 int i, mapind, nummaps = 0;
242 ret = of_parse_phandle_with_args(dev->of_node,
250 if (nummaps == 0) /* no error, return NULL to search map table */
253 /* NULL terminated array to save passing size */
254 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
256 return ERR_PTR(-ENOMEM);
258 /* Search for OF matches */
259 for (mapind = 0; mapind < nummaps; mapind++) {
260 ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
263 goto error_free_chans;
268 for (i = 0; i < mapind; i++)
269 iio_device_put(chans[i].indio_dev);
274 #else /* CONFIG_OF */
276 static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
281 #endif /* CONFIG_OF */
283 static struct iio_channel *iio_channel_get_sys(const char *name,
284 const char *channel_name)
286 struct iio_map_internal *c_i = NULL, *c = NULL;
287 struct iio_channel *channel;
290 if (name == NULL && channel_name == NULL)
291 return ERR_PTR(-ENODEV);
293 /* first find matching entry the channel map */
294 mutex_lock(&iio_map_list_lock);
295 list_for_each_entry(c_i, &iio_map_list, l) {
296 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
298 strcmp(channel_name, c_i->map->consumer_channel) != 0))
301 iio_device_get(c->indio_dev);
304 mutex_unlock(&iio_map_list_lock);
306 return ERR_PTR(-ENODEV);
308 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
309 if (channel == NULL) {
314 channel->indio_dev = c->indio_dev;
316 if (c->map->adc_channel_label) {
318 iio_chan_spec_from_name(channel->indio_dev,
319 c->map->adc_channel_label);
321 if (channel->channel == NULL) {
332 iio_device_put(c->indio_dev);
336 struct iio_channel *iio_channel_get(struct device *dev,
337 const char *channel_name)
339 const char *name = dev ? dev_name(dev) : NULL;
340 struct iio_channel *channel;
343 channel = of_iio_channel_get_by_name(dev->of_node,
349 return iio_channel_get_sys(name, channel_name);
351 EXPORT_SYMBOL_GPL(iio_channel_get);
353 void iio_channel_release(struct iio_channel *channel)
357 iio_device_put(channel->indio_dev);
360 EXPORT_SYMBOL_GPL(iio_channel_release);
362 static void devm_iio_channel_free(struct device *dev, void *res)
364 struct iio_channel *channel = *(struct iio_channel **)res;
366 iio_channel_release(channel);
369 struct iio_channel *devm_iio_channel_get(struct device *dev,
370 const char *channel_name)
372 struct iio_channel **ptr, *channel;
374 ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
376 return ERR_PTR(-ENOMEM);
378 channel = iio_channel_get(dev, channel_name);
379 if (IS_ERR(channel)) {
385 devres_add(dev, ptr);
389 EXPORT_SYMBOL_GPL(devm_iio_channel_get);
391 struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
392 struct device_node *np,
393 const char *channel_name)
395 struct iio_channel **ptr, *channel;
397 ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
399 return ERR_PTR(-ENOMEM);
401 channel = of_iio_channel_get_by_name(np, channel_name);
402 if (IS_ERR(channel)) {
408 devres_add(dev, ptr);
412 EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name);
414 struct iio_channel *iio_channel_get_all(struct device *dev)
417 struct iio_channel *chans;
418 struct iio_map_internal *c = NULL;
424 return ERR_PTR(-EINVAL);
426 chans = of_iio_channel_get_all(dev);
430 name = dev_name(dev);
432 mutex_lock(&iio_map_list_lock);
433 /* first count the matching maps */
434 list_for_each_entry(c, &iio_map_list, l)
435 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
445 /* NULL terminated array to save passing size */
446 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
452 /* for each map fill in the chans element */
453 list_for_each_entry(c, &iio_map_list, l) {
454 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
456 chans[mapind].indio_dev = c->indio_dev;
457 chans[mapind].data = c->map->consumer_data;
458 chans[mapind].channel =
459 iio_chan_spec_from_name(chans[mapind].indio_dev,
460 c->map->adc_channel_label);
461 if (chans[mapind].channel == NULL) {
463 goto error_free_chans;
465 iio_device_get(chans[mapind].indio_dev);
470 goto error_free_chans;
472 mutex_unlock(&iio_map_list_lock);
477 for (i = 0; i < nummaps; i++)
478 iio_device_put(chans[i].indio_dev);
481 mutex_unlock(&iio_map_list_lock);
485 EXPORT_SYMBOL_GPL(iio_channel_get_all);
487 void iio_channel_release_all(struct iio_channel *channels)
489 struct iio_channel *chan = &channels[0];
491 while (chan->indio_dev) {
492 iio_device_put(chan->indio_dev);
497 EXPORT_SYMBOL_GPL(iio_channel_release_all);
499 static void devm_iio_channel_free_all(struct device *dev, void *res)
501 struct iio_channel *channels = *(struct iio_channel **)res;
503 iio_channel_release_all(channels);
506 struct iio_channel *devm_iio_channel_get_all(struct device *dev)
508 struct iio_channel **ptr, *channels;
510 ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
512 return ERR_PTR(-ENOMEM);
514 channels = iio_channel_get_all(dev);
515 if (IS_ERR(channels)) {
521 devres_add(dev, ptr);
525 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
527 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
528 enum iio_chan_info_enum info)
531 int vals[INDIO_MAX_RAW_ELEMENTS];
538 if (!iio_channel_has_info(chan->channel, info))
541 if (chan->indio_dev->info->read_raw_multi) {
542 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
543 chan->channel, INDIO_MAX_RAW_ELEMENTS,
544 vals, &val_len, info);
548 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
549 chan->channel, val, val2, info);
554 int iio_read_channel_raw(struct iio_channel *chan, int *val)
558 mutex_lock(&chan->indio_dev->info_exist_lock);
559 if (chan->indio_dev->info == NULL) {
564 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
566 mutex_unlock(&chan->indio_dev->info_exist_lock);
570 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
572 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
576 mutex_lock(&chan->indio_dev->info_exist_lock);
577 if (chan->indio_dev->info == NULL) {
582 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
584 mutex_unlock(&chan->indio_dev->info_exist_lock);
588 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
590 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
591 int raw, int *processed, unsigned int scale)
593 int scale_type, scale_val, scale_val2, offset;
597 ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET);
601 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
602 IIO_CHAN_INFO_SCALE);
603 if (scale_type < 0) {
605 * Just pass raw values as processed if no scaling is
612 switch (scale_type) {
614 *processed = raw64 * scale_val;
616 case IIO_VAL_INT_PLUS_MICRO:
618 *processed = -raw64 * scale_val;
620 *processed = raw64 * scale_val;
621 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
624 case IIO_VAL_INT_PLUS_NANO:
626 *processed = -raw64 * scale_val;
628 *processed = raw64 * scale_val;
629 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
632 case IIO_VAL_FRACTIONAL:
633 *processed = div_s64(raw64 * (s64)scale_val * scale,
636 case IIO_VAL_FRACTIONAL_LOG2:
637 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
646 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
647 int *processed, unsigned int scale)
651 mutex_lock(&chan->indio_dev->info_exist_lock);
652 if (chan->indio_dev->info == NULL) {
657 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
660 mutex_unlock(&chan->indio_dev->info_exist_lock);
664 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
666 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
667 enum iio_chan_info_enum attribute)
671 mutex_lock(&chan->indio_dev->info_exist_lock);
672 if (chan->indio_dev->info == NULL) {
677 ret = iio_channel_read(chan, val, val2, attribute);
679 mutex_unlock(&chan->indio_dev->info_exist_lock);
683 EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
685 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
687 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
689 EXPORT_SYMBOL_GPL(iio_read_channel_offset);
691 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
696 mutex_lock(&chan->indio_dev->info_exist_lock);
697 if (chan->indio_dev->info == NULL) {
702 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
703 ret = iio_channel_read(chan, val, NULL,
704 IIO_CHAN_INFO_PROCESSED);
709 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
712 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val,
717 mutex_unlock(&chan->indio_dev->info_exist_lock);
721 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
723 int iio_read_channel_processed(struct iio_channel *chan, int *val)
725 /* This is just a special case with scale factor 1 */
726 return iio_read_channel_processed_scale(chan, val, 1);
728 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
730 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
732 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
734 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
736 static int iio_channel_read_avail(struct iio_channel *chan,
737 const int **vals, int *type, int *length,
738 enum iio_chan_info_enum info)
740 if (!iio_channel_has_available(chan->channel, info))
743 return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
744 vals, type, length, info);
747 int iio_read_avail_channel_attribute(struct iio_channel *chan,
748 const int **vals, int *type, int *length,
749 enum iio_chan_info_enum attribute)
753 mutex_lock(&chan->indio_dev->info_exist_lock);
754 if (!chan->indio_dev->info) {
759 ret = iio_channel_read_avail(chan, vals, type, length, attribute);
761 mutex_unlock(&chan->indio_dev->info_exist_lock);
765 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
767 int iio_read_avail_channel_raw(struct iio_channel *chan,
768 const int **vals, int *length)
773 ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
776 if (ret >= 0 && type != IIO_VAL_INT)
777 /* raw values are assumed to be IIO_VAL_INT */
782 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
784 static int iio_channel_read_max(struct iio_channel *chan,
785 int *val, int *val2, int *type,
786 enum iio_chan_info_enum info)
796 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
798 case IIO_AVAIL_RANGE:
814 *val = vals[--length];
816 if (vals[--length] > *val)
821 /* FIXME: learn about max for other iio values */
831 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
836 mutex_lock(&chan->indio_dev->info_exist_lock);
837 if (!chan->indio_dev->info) {
842 ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
844 mutex_unlock(&chan->indio_dev->info_exist_lock);
848 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
850 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
853 /* Need to verify underlying driver has not gone away */
855 mutex_lock(&chan->indio_dev->info_exist_lock);
856 if (chan->indio_dev->info == NULL) {
861 *type = chan->channel->type;
863 mutex_unlock(&chan->indio_dev->info_exist_lock);
867 EXPORT_SYMBOL_GPL(iio_get_channel_type);
869 static int iio_channel_write(struct iio_channel *chan, int val, int val2,
870 enum iio_chan_info_enum info)
872 return chan->indio_dev->info->write_raw(chan->indio_dev,
873 chan->channel, val, val2, info);
876 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
877 enum iio_chan_info_enum attribute)
881 mutex_lock(&chan->indio_dev->info_exist_lock);
882 if (chan->indio_dev->info == NULL) {
887 ret = iio_channel_write(chan, val, val2, attribute);
889 mutex_unlock(&chan->indio_dev->info_exist_lock);
893 EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
895 int iio_write_channel_raw(struct iio_channel *chan, int val)
897 return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
899 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
901 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
903 const struct iio_chan_spec_ext_info *ext_info;
906 if (!chan->channel->ext_info)
909 for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
914 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
916 static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
917 const struct iio_channel *chan,
920 const struct iio_chan_spec_ext_info *ext_info;
922 if (!chan->channel->ext_info)
925 for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
926 if (!strcmp(attr, ext_info->name))
933 ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
934 const char *attr, char *buf)
936 const struct iio_chan_spec_ext_info *ext_info;
938 ext_info = iio_lookup_ext_info(chan, attr);
942 return ext_info->read(chan->indio_dev, ext_info->private,
945 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
947 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
948 const char *buf, size_t len)
950 const struct iio_chan_spec_ext_info *ext_info;
952 ext_info = iio_lookup_ext_info(chan, attr);
956 return ext_info->write(chan->indio_dev, ext_info->private,
957 chan->channel, buf, len);
959 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);