1 /* The industrial I/O core in kernel channel mapping
3 * Copyright (c) 2011 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
14 #include <linux/iio/iio.h>
16 #include <linux/iio/machine.h>
17 #include <linux/iio/driver.h>
18 #include <linux/iio/consumer.h>
20 struct iio_map_internal {
21 struct iio_dev *indio_dev;
26 static LIST_HEAD(iio_map_list);
27 static DEFINE_MUTEX(iio_map_list_lock);
29 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
32 struct iio_map_internal *mapi;
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
50 mutex_unlock(&iio_map_list_lock);
54 EXPORT_SYMBOL_GPL(iio_map_array_register);
57 /* Assumes the exact same array (e.g. memory locations)
58 * used at unregistration as used at registration rather than
59 * more complex checking of contents.
61 int iio_map_array_unregister(struct iio_dev *indio_dev,
66 struct iio_map_internal *mapi;
71 mutex_lock(&iio_map_list_lock);
72 while (maps[i].consumer_dev_name != NULL) {
74 list_for_each_entry(mapi, &iio_map_list, l)
75 if (&maps[i] == mapi->map) {
81 if (found_it == false) {
88 mutex_unlock(&iio_map_list_lock);
92 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
94 static const struct iio_chan_spec
95 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
98 const struct iio_chan_spec *chan = NULL;
100 for (i = 0; i < indio_dev->num_channels; i++)
101 if (indio_dev->channels[i].datasheet_name &&
102 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
103 chan = &indio_dev->channels[i];
110 struct iio_channel *iio_channel_get(const char *name, const char *channel_name)
112 struct iio_map_internal *c_i = NULL, *c = NULL;
113 struct iio_channel *channel;
115 if (name == NULL && channel_name == NULL)
116 return ERR_PTR(-ENODEV);
118 /* first find matching entry the channel map */
119 mutex_lock(&iio_map_list_lock);
120 list_for_each_entry(c_i, &iio_map_list, l) {
121 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
123 strcmp(channel_name, c_i->map->consumer_channel) != 0))
126 iio_device_get(c->indio_dev);
129 mutex_unlock(&iio_map_list_lock);
131 return ERR_PTR(-ENODEV);
133 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
135 return ERR_PTR(-ENOMEM);
137 channel->indio_dev = c->indio_dev;
139 if (c->map->adc_channel_label)
141 iio_chan_spec_from_name(channel->indio_dev,
142 c->map->adc_channel_label);
146 EXPORT_SYMBOL_GPL(iio_channel_get);
148 void iio_channel_release(struct iio_channel *channel)
150 iio_device_put(channel->indio_dev);
153 EXPORT_SYMBOL_GPL(iio_channel_release);
155 struct iio_channel *iio_channel_get_all(const char *name)
157 struct iio_channel *chans;
158 struct iio_map_internal *c = NULL;
164 return ERR_PTR(-EINVAL);
166 mutex_lock(&iio_map_list_lock);
167 /* first count the matching maps */
168 list_for_each_entry(c, &iio_map_list, l)
169 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
179 /* NULL terminated array to save passing size */
180 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
186 /* for each map fill in the chans element */
187 list_for_each_entry(c, &iio_map_list, l) {
188 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
190 chans[mapind].indio_dev = c->indio_dev;
191 chans[mapind].channel =
192 iio_chan_spec_from_name(chans[mapind].indio_dev,
193 c->map->adc_channel_label);
194 if (chans[mapind].channel == NULL) {
196 goto error_free_chans;
198 iio_device_get(chans[mapind].indio_dev);
203 goto error_free_chans;
205 mutex_unlock(&iio_map_list_lock);
210 for (i = 0; i < nummaps; i++)
211 iio_device_put(chans[i].indio_dev);
214 mutex_unlock(&iio_map_list_lock);
218 EXPORT_SYMBOL_GPL(iio_channel_get_all);
220 void iio_channel_release_all(struct iio_channel *channels)
222 struct iio_channel *chan = &channels[0];
224 while (chan->indio_dev) {
225 iio_device_put(chan->indio_dev);
230 EXPORT_SYMBOL_GPL(iio_channel_release_all);
232 int iio_read_channel_raw(struct iio_channel *chan, int *val)
236 mutex_lock(&chan->indio_dev->info_exist_lock);
237 if (chan->indio_dev->info == NULL) {
242 ret = chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
245 mutex_unlock(&chan->indio_dev->info_exist_lock);
249 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
251 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
255 mutex_lock(&chan->indio_dev->info_exist_lock);
256 if (chan->indio_dev->info == NULL) {
261 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
264 IIO_CHAN_INFO_SCALE);
266 mutex_unlock(&chan->indio_dev->info_exist_lock);
270 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
272 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
275 /* Need to verify underlying driver has not gone away */
277 mutex_lock(&chan->indio_dev->info_exist_lock);
278 if (chan->indio_dev->info == NULL) {
283 *type = chan->channel->type;
285 mutex_unlock(&chan->indio_dev->info_exist_lock);
289 EXPORT_SYMBOL_GPL(iio_get_channel_type);